source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTFwd.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprConcepts.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenCLOptions.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/SemaConcept.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include <deque>
#include <memory>
#include <string>
#include <tuple>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
enum class OverloadCandidateParamOrder : char;
enum OverloadCandidateRewriteKind : unsigned;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
// TODO SYCL Integration header approach relies on an assumption that kernel
// lambda objects created by the host compiler and any of the device compilers
// will be identical wrt to field types, order and offsets. Some verification
// mechanism should be developed to enforce that.
// TODO FIXME SYCL Support for SYCL in FE should be refactored:
// - kernel identification and generation should be made a separate pass over
// AST. RecursiveASTVisitor + VisitFunctionTemplateDecl +
// FunctionTemplateDecl::getSpecializations() mechanism could be used for that.
// - All SYCL stuff on Sema level should be encapsulated into a single Sema
// field
// - Move SYCL stuff into a separate header
// Represents contents of a SYCL integration header file produced by a SYCL
// device compiler and used by SYCL host compiler (via forced inclusion into
// compiled SYCL source):
// - SYCL kernel names
// - SYCL kernel parameters and offsets of corresponding actual arguments
class SYCLIntegrationHeader {
public:
// Kind of kernel's parameters as captured by the compiler in the
// kernel lambda or function object
enum kernel_param_kind_t {
kind_first,
kind_accessor = kind_first,
kind_std_layout,
kind_sampler,
kind_pointer,
kind_specialization_constants_buffer,
kind_last = kind_specialization_constants_buffer
};
public:
SYCLIntegrationHeader(bool UnnamedLambdaSupport, Sema &S);
/// Emits contents of the header into given stream.
void emit(raw_ostream &Out);
/// Emits contents of the header into a file with given name.
/// Returns true/false on success/failure.
bool emit(StringRef MainSrc);
/// Signals that subsequent parameter descriptor additions will go to
/// the kernel with given name. Starts new kernel invocation descriptor.
void startKernel(StringRef KernelName, QualType KernelNameType,
StringRef KernelStableName, SourceLocation Loc,
bool IsESIMD);
/// Adds a kernel parameter descriptor to current kernel invocation
/// descriptor.
void addParamDesc(kernel_param_kind_t Kind, int Info, unsigned Offset);
/// Signals that addition of parameter descriptors to current kernel
/// invocation descriptor has finished.
void endKernel();
/// Registers a specialization constant to emit info for it into the header.
void addSpecConstant(StringRef IDName, QualType IDType);
/// Note which free functions (this_id, this_item, etc) are called within the
/// kernel
void setCallsThisId(bool B);
void setCallsThisItem(bool B);
void setCallsThisNDItem(bool B);
void setCallsThisGroup(bool B);
private:
// Kernel actual parameter descriptor.
struct KernelParamDesc {
// Represents a parameter kind.
kernel_param_kind_t Kind = kind_last;
// If Kind is kind_scalar or kind_struct, then
// denotes parameter size in bytes (includes padding for structs)
// If Kind is kind_accessor
// denotes access target; possible access targets are defined in
// access/access.hpp
int Info = 0;
// Offset of the captured parameter value in the lambda or function object.
unsigned Offset = 0;
KernelParamDesc() = default;
};
// there are four free functions the kernel may call (this_id, this_item,
// this_nd_item, this_group)
struct KernelCallsSYCLFreeFunction {
bool CallsThisId;
bool CallsThisItem;
bool CallsThisNDItem;
bool CallsThisGroup;
};
// Kernel invocation descriptor
struct KernelDesc {
/// Kernel name.
std::string Name;
/// Kernel name type.
QualType NameType;
/// Kernel name with stable lambda name mangling
std::string StableName;
SourceLocation KernelLocation;
/// Whether this kernel is an ESIMD one.
bool IsESIMDKernel;
/// Descriptor of kernel actual parameters.
SmallVector<KernelParamDesc, 8> Params;
// Whether kernel calls any of the SYCL free functions (this_item(),
// this_id(), etc)
KernelCallsSYCLFreeFunction FreeFunctionCalls;
KernelDesc() = default;
};
/// Returns the latest invocation descriptor started by
/// SYCLIntegrationHeader::startKernel
KernelDesc *getCurKernelDesc() {
return KernelDescs.size() > 0 ? &KernelDescs[KernelDescs.size() - 1]
: nullptr;
}
private:
/// Keeps invocation descriptors for each kernel invocation started by
/// SYCLIntegrationHeader::startKernel
SmallVector<KernelDesc, 4> KernelDescs;
using SpecConstID = std::pair<QualType, std::string>;
/// Keeps specialization constants met in the translation unit. Maps spec
/// constant's ID type to generated unique name. Duplicates are removed at
/// integration header emission time.
llvm::SmallVector<SpecConstID, 4> SpecConsts;
/// Whether header is generated with unnamed lambda support
bool UnnamedLambdaSupport;
Sema &S;
};
class SYCLIntegrationFooter {
public:
SYCLIntegrationFooter(Sema &S) : S(S) {}
bool emit(StringRef MainSrc);
void addVarDecl(const VarDecl *VD);
private:
bool emit(raw_ostream &O);
Sema &S;
llvm::SmallVector<const VarDecl *> SpecConstants;
void emitSpecIDName(raw_ostream &O, const VarDecl *VD);
};
/// Tracks expected type during expression parsing, for use in code completion.
/// The type is tied to a particular token, all functions that update or consume
/// the type take a start location of the token they are looking at as a
/// parameter. This avoids updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder(bool Enabled) : Enabled(Enabled) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Handles e.g. BaseType{ .D = Tok...
void enterDesignatedInitializer(SourceLocation Tok, QualType BaseType,
const Designation &D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
///
/// The callback should also emit signature help as a side-effect, but only
/// if the completion point has been reached.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
/// Get the expected type associated with this location, if any.
///
/// If the location is a function argument, determining the expected type
/// involves considering all function overloads and the arguments so far.
/// In this case, signature help for these function overloads will be reported
/// as a side-effect (only if the completion point has been reached).
QualType get(SourceLocation Tok) const {
if (!Enabled || Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
bool Enabled;
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema final {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
/// The maximum alignment, same as in llvm::Value. We duplicate them here
/// because that allows us not to duplicate the constants in clang code,
/// which we must to since we can't directly use the llvm constants.
/// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp
///
/// This is the greatest alignment value supported by load, store, and alloca
/// instructions, and global values.
static const unsigned MaxAlignmentExponent = 29;
static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent;
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions CurFPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// Holds TypoExprs that are created from `createDelayedTypo`. This is used by
/// `TransformTypos` in order to keep track of any TypoExprs that are created
/// recursively during typo correction and wipe them away if the correction
/// fails.
llvm::SmallVector<TypoExpr *, 2> TypoExprs;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4,
PCSK_Relro = 5
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangRelroSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
// #pragma pack and align.
class AlignPackInfo {
public:
// `Native` represents default align mode, which may vary based on the
// platform.
enum Mode : unsigned char { Native, Natural, Packed, Mac68k };
// #pragma pack info constructor
AlignPackInfo(AlignPackInfo::Mode M, unsigned Num, bool IsXL)
: PackAttr(true), AlignMode(M), PackNumber(Num), XLStack(IsXL) {
assert(Num == PackNumber && "The pack number has been truncated.");
}
// #pragma align info constructor
AlignPackInfo(AlignPackInfo::Mode M, bool IsXL)
: PackAttr(false), AlignMode(M),
PackNumber(M == Packed ? 1 : UninitPackVal), XLStack(IsXL) {}
explicit AlignPackInfo(bool IsXL) : AlignPackInfo(Native, IsXL) {}
AlignPackInfo() : AlignPackInfo(Native, false) {}
// When a AlignPackInfo itself cannot be used, this returns an 32-bit
// integer encoding for it. This should only be passed to
// AlignPackInfo::getFromRawEncoding, it should not be inspected directly.
static uint32_t getRawEncoding(const AlignPackInfo &Info) {
std::uint32_t Encoding{};
if (Info.IsXLStack())
Encoding |= IsXLMask;
Encoding |= static_cast<uint32_t>(Info.getAlignMode()) << 1;
if (Info.IsPackAttr())
Encoding |= PackAttrMask;
Encoding |= static_cast<uint32_t>(Info.getPackNumber()) << 4;
return Encoding;
}
static AlignPackInfo getFromRawEncoding(unsigned Encoding) {
bool IsXL = static_cast<bool>(Encoding & IsXLMask);
AlignPackInfo::Mode M =
static_cast<AlignPackInfo::Mode>((Encoding & AlignModeMask) >> 1);
int PackNumber = (Encoding & PackNumMask) >> 4;
if (Encoding & PackAttrMask)
return AlignPackInfo(M, PackNumber, IsXL);
return AlignPackInfo(M, IsXL);
}
bool IsPackAttr() const { return PackAttr; }
bool IsAlignAttr() const { return !PackAttr; }
Mode getAlignMode() const { return AlignMode; }
unsigned getPackNumber() const { return PackNumber; }
bool IsPackSet() const {
// #pragma align, #pragma pack(), and #pragma pack(0) do not set the pack
// attriute on a decl.
return PackNumber != UninitPackVal && PackNumber != 0;
}
bool IsXLStack() const { return XLStack; }
bool operator==(const AlignPackInfo &Info) const {
return std::tie(AlignMode, PackNumber, PackAttr, XLStack) ==
std::tie(Info.AlignMode, Info.PackNumber, Info.PackAttr,
Info.XLStack);
}
bool operator!=(const AlignPackInfo &Info) const {
return !(*this == Info);
}
private:
/// \brief True if this is a pragma pack attribute,
/// not a pragma align attribute.
bool PackAttr;
/// \brief The alignment mode that is in effect.
Mode AlignMode;
/// \brief The pack number of the stack.
unsigned char PackNumber;
/// \brief True if it is a XL #pragma align/pack stack.
bool XLStack;
/// \brief Uninitialized pack value.
static constexpr unsigned char UninitPackVal = -1;
// Masks to encode and decode an AlignPackInfo.
static constexpr uint32_t IsXLMask{0x0000'0001};
static constexpr uint32_t AlignModeMask{0x0000'0006};
static constexpr uint32_t PackAttrMask{0x00000'0008};
static constexpr uint32_t PackNumMask{0x0000'01F0};
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel, ValueType Value) {
if (Action == PSK_Reset) {
CurrentValue = DefaultValue;
CurrentPragmaLocation = PragmaLocation;
return;
}
if (Action & PSK_Push)
Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation,
PragmaLocation);
else if (Action & PSK_Pop) {
if (!StackSlotLabel.empty()) {
// If we've got a label, try to find it and jump there.
auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) {
return x.StackSlotLabel == StackSlotLabel;
});
// If we found the label so pop from there.
if (I != Stack.rend()) {
CurrentValue = I->Value;
CurrentPragmaLocation = I->PragmaLocation;
Stack.erase(std::prev(I.base()), Stack.end());
}
} else if (!Stack.empty()) {
// We do not have a label, just pop the last entry.
CurrentValue = Stack.back().Value;
CurrentPragmaLocation = Stack.back().PragmaLocation;
Stack.pop_back();
}
}
if (Action & PSK_Set) {
CurrentValue = Value;
CurrentPragmaLocation = PragmaLocation;
}
}
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispMode> VtorDispStack;
PragmaStack<AlignPackInfo> AlignPackStack;
// The current #pragma align/pack values and locations at each #include.
struct AlignPackIncludeState {
AlignPackInfo CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<AlignPackIncludeState, 8> AlignPackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// This stack tracks the current state of Sema.CurFPFeatures.
PragmaStack<FPOptionsOverride> FpPragmaStack;
FPOptionsOverride CurFPFeatureOverrides() {
FPOptionsOverride result;
if (!FpPragmaStack.hasValue()) {
result = FPOptionsOverride();
} else {
result = FpPragmaStack.CurrentValue;
}
return result;
}
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression.
SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>,
llvm::SmallPtrSet<Expr *, 4>>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
/// The index of the first FunctionScope that corresponds to the current
/// context.
unsigned FunctionScopesStart = 0;
ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const {
return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart,
FunctionScopes.end());
}
/// Stack containing information needed when in C++2a an 'auto' is encountered
/// in a function declaration parameter type specifier in order to invent a
/// corresponding template parameter in the enclosing abbreviated function
/// template. This information is also present in LambdaScopeInfo, stored in
/// the FunctionScopes stack.
SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos;
/// The index of the first InventedParameterInfo that refers to the current
/// context.
unsigned InventedParameterInfosStart = 0;
ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const {
return llvm::makeArrayRef(InventedParameterInfos.begin() +
InventedParameterInfosStart,
InventedParameterInfos.end());
}
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
/// All the external declarations encoutered and used in the TU.
SmallVector<VarDecl *, 4> ExternalDeclarations;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
unsigned SavedFunctionScopesStart;
unsigned SavedInventedParameterInfosStart;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride),
SavedFunctionScopesStart(S.FunctionScopesStart),
SavedInventedParameterInfosStart(S.InventedParameterInfosStart)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
// Any saved FunctionScopes do not refer to this context.
S.FunctionScopesStart = S.FunctionScopes.size();
S.InventedParameterInfosStart = S.InventedParameterInfos.size();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
S.FunctionScopesStart = SavedFunctionScopesStart;
S.InventedParameterInfosStart = SavedInventedParameterInfosStart;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Whether the AST is currently being rebuilt to correct immediate
/// invocations. Immediate invocation candidates and references to consteval
/// functions aren't tracked when this is set.
bool RebuildingImmediateInvocation = false;
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>;
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// Expressions appearing as the LHS of a volatile assignment in this
/// context. We produce a warning for these when popping the context if
/// they are not discarded-value expressions nor unevaluated operands.
SmallVector<Expr*, 2> VolatileAssignmentLHSs;
/// Set of candidates for starting an immediate invocation.
llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates;
/// Set of DeclRefExprs referencing a consteval function when used in a
/// context not already known to be immediately invoked.
llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {}
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal. Also return the extra mangling decl if any.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
std::tuple<MangleNumberingContext *, Decl *>
getCurrentMangleNumberContext(const DeclContext *DC);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// Kinds of defaulted comparison operator functions.
enum class DefaultedComparisonKind : unsigned char {
/// This is not a defaultable comparison operator.
None,
/// This is an operator== that should be implemented as a series of
/// subobject comparisons.
Equal,
/// This is an operator<=> that should be implemented as a series of
/// subobject comparisons.
ThreeWay,
/// This is an operator!= that should be implemented as a rewrite in terms
/// of a == comparison.
NotEqual,
/// This is an <, <=, >, or >= that should be implemented as a rewrite in
/// terms of a <=> comparison.
Relational,
};
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the CurFPFeatures state on entry/exit of compound
/// statements.
class FPFeaturesStateRAII {
public:
FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) {
OldOverrides = S.FpPragmaStack.CurrentValue;
}
~FPFeaturesStateRAII() {
S.CurFPFeatures = OldFPFeaturesState;
S.FpPragmaStack.CurrentValue = OldOverrides;
}
FPOptionsOverride getOverrides() { return OldOverrides; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
FPOptionsOverride OldOverrides;
};
void addImplicitTypedef(StringRef Name, QualType T);
bool WarnedStackExhausted = false;
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
/// This virtual key function only exists to limit the emission of debug info
/// describing the Sema class. GCC and Clang only emit debug info for a class
/// with a vtable when the vtable is emitted. Sema is final and not
/// polymorphic, but the debug info size savings are so significant that it is
/// worth adding a vtable just to take advantage of this optimization.
virtual void anchor();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getCurFPFeatures() { return CurFPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Warn that the stack is nearly exhausted.
void warnStackExhausted(SourceLocation Loc);
/// Run some code with "sufficient" stack space. (Currently, at least 256K is
/// guaranteed). Produces a warning if we're low on stack space and allocates
/// more in that case. Use this in code that may recurse deeply (for example,
/// in template instantiation) to avoid stack overflow.
void runWithSufficientStackSpace(SourceLocation Loc,
llvm::function_ref<void()> Fn);
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. ImmediateDiagBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class ImmediateDiagBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
ImmediateDiagBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
ImmediateDiagBuilder(DiagnosticBuilder &&DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) {}
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~ImmediateDiagBuilder is a safe no-op
// in that case anwyay.
ImmediateDiagBuilder(const ImmediateDiagBuilder &) = default;
~ImmediateDiagBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First clear the diagnostic
// builder itself so it won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template <typename T>
friend const ImmediateDiagBuilder &
operator<<(const ImmediateDiagBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const ImmediateDiagBuilder &operator<<(T &&V) const {
const DiagnosticBuilder &BaseDiag = *this;
BaseDiag << std::move(V);
return *this;
}
};
/// Bitmask to contain the list of reasons a single diagnostic should be
/// emitted, based on its language. This permits multiple offload systems
/// to coexist in the same translation unit.
enum class DeviceDiagnosticReason {
/// Diagnostic doesn't apply to anything. Included for completeness, but
/// should make this a no-op.
None = 0,
/// OpenMP specific diagnostic.
OmpDevice = 1 << 0,
OmpHost = 1 << 1,
OmpAll = OmpDevice | OmpHost,
/// CUDA specific diagnostics.
CudaDevice = 1 << 2,
CudaHost = 1 << 3,
CudaAll = CudaDevice | CudaHost,
/// SYCL specific diagnostic.
Sycl = 1 << 4,
/// ESIMD specific diagnostic.
Esimd = 1 << 5,
/// A flag representing 'all'. This can be used to avoid the check
/// all-together and make this behave as it did before the
/// DiagnosticReason was added (that is, unconditionally emit).
/// Note: This needs to be updated if any flags above are added.
All = OmpAll | CudaAll | Sycl | Esimd,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/All)
};
private:
// A collection of a pair of undefined functions and their callers known
// to be reachable from a routine on the device (kernel or device function).
typedef std::pair<const FunctionDecl *, const FunctionDecl *> CallPair;
llvm::SmallVector<CallPair> UndefinedReachableFromSyclDevice;
public:
// Helper routine to add a pair of Callee-Caller pair of FunctionDecl *
// to UndefinedReachableFromSyclDevice.
void addFDToReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
UndefinedReachableFromSyclDevice.push_back(std::make_pair(Callee, Caller));
}
// Helper routine to check if a pair of Callee-Caller FunctionDecl *
// is in UndefinedReachableFromSyclDevice.
bool isFDReachableFromSyclDevice(const FunctionDecl *Callee,
const FunctionDecl *Caller) {
return llvm::any_of(UndefinedReachableFromSyclDevice,
[Callee, Caller](const CallPair &P) {
return P.first == Callee && P.second == Caller;
});
}
/// A generic diagnostic builder for errors which may or may not be deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class SemaDiagnosticBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
SemaDiagnosticBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S, DeviceDiagnosticReason R);
SemaDiagnosticBuilder(SemaDiagnosticBuilder &&D);
SemaDiagnosticBuilder(const SemaDiagnosticBuilder &) = default;
~SemaDiagnosticBuilder();
bool isImmediate() const { return ImmediateDiag.hasValue(); }
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (SemaDiagnosticBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a SemaDiagnosticBuilder yourself.
operator bool() const { return isImmediate(); }
template <typename T>
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second
<< Value;
return Diag;
}
// It is necessary to limit this to rvalue reference to avoid calling this
// function with a bitfield lvalue argument since non-const reference to
// bitfield is not allowed.
template <typename T, typename = typename std::enable_if<
!std::is_lvalue_reference<T>::value>::type>
const SemaDiagnosticBuilder &operator<<(T &&V) const {
if (ImmediateDiag.hasValue())
*ImmediateDiag << std::move(V);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second
<< std::move(V);
return *this;
}
friend const SemaDiagnosticBuilder &
operator<<(const SemaDiagnosticBuilder &Diag, const PartialDiagnostic &PD) {
if (Diag.ImmediateDiag.hasValue())
PD.Emit(*Diag.ImmediateDiag);
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId]
.getDiag()
.second = PD;
return Diag;
}
void AddFixItHint(const FixItHint &Hint) const {
if (ImmediateDiag.hasValue())
ImmediateDiag->AddFixItHint(Hint);
else if (PartialDiagId.hasValue())
S.DeviceDeferredDiags[Fn][*PartialDiagId].getDiag().second.AddFixItHint(
Hint);
}
friend ExprResult ExprError(const SemaDiagnosticBuilder &) {
return ExprError();
}
friend StmtResult StmtError(const SemaDiagnosticBuilder &) {
return StmtError();
}
operator ExprResult() const { return ExprError(); }
operator StmtResult() const { return StmtError(); }
operator TypeResult() const { return TypeError(); }
operator DeclResult() const { return DeclResult(true); }
operator MemInitResult() const { return MemInitResult(true); }
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<ImmediateDiagBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Is the last error level diagnostic immediate. This is used to determined
/// whether the next info diagnostic should be immediate.
bool IsLastErrorImmediate = true;
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID,
bool DeferHint = false);
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic &PD,
bool DeferHint = false);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
/// Whether uncompilable error has occurred. This includes error happens
/// in deferred diagnostics.
bool hasUncompilableErrorOccurred() const;
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
/// Invent a new identifier for parameters of abbreviated templates.
IdentifierInfo *
InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName,
unsigned Index);
void emitAndClearUnusedLocalTypedefWarnings();
private:
/// Function or variable declarations to be checked for whether the deferred
/// diagnostics should be emitted.
SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags;
public:
// Emit all deferred diagnostics.
void emitDeferredDiags();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD, CapturedRegionKind K,
unsigned OpenMPCaptureLevel = 0);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void setFunctionHasMustTail();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Get the innermost lambda enclosing the current location, if any. This
/// looks through intervening non-lambda scopes such as local functions and
/// blocks.
sema::LambdaScopeInfo *getEnclosingLambda() const;
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
/// Called before parsing a function declarator belonging to a function
/// declaration.
void ActOnStartFunctionDeclarationDeclarator(Declarator &D,
unsigned TemplateParameterDepth);
/// Called after parsing a function declarator belonging to a function
/// declaration.
void ActOnFinishFunctionDeclarationDeclarator(Declarator &D);
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
SourceRange Brackets, DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
SYCLIntelFPGAIVDepAttr *
BuildSYCLIntelFPGAIVDepAttr(const AttributeCommonInfo &CI, Expr *Expr1,
Expr *Expr2);
template <typename FPGALoopAttrT>
FPGALoopAttrT *BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E = nullptr);
LoopUnrollHintAttr *BuildLoopUnrollHintAttr(const AttributeCommonInfo &A,
Expr *E);
OpenCLUnrollHintAttr *
BuildOpenCLLoopUnrollHintAttr(const AttributeCommonInfo &A, Expr *E);
SYCLIntelFPGALoopCountAttr *
BuildSYCLIntelFPGALoopCount(const AttributeCommonInfo &CI, Expr *E);
bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Stmt *E);
/// Determine whether the callee of a particular function call can throw.
/// E, D and Loc are all optional.
static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D,
SourceLocation Loc = SourceLocation());
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
protected:
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
std::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, std::index_sequence_for<Ts...>());
DB << T;
}
};
/// Do a check to make sure \p Name looks like a legal argument for the
/// swift_name attribute applied to decl \p D. Raise a diagnostic if the name
/// is invalid for the given declaration.
///
/// \p AL is used to provide caret diagnostics in case of a malformed name.
///
/// \returns true if the name is a valid swift name for \p D, false otherwise.
bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation Loc,
const ParsedAttr &AL, bool IsAsync);
/// A derivative of BoundTypeDiagnoser for which the diagnostic's type
/// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless.
/// For example, a diagnostic with no other parameters would generally have
/// the form "...%select{incomplete|sizeless}0 type %1...".
template <typename... Ts>
class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> {
public:
SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args)
: BoundTypeDiagnoser<Ts...>(DiagID, Args...) {}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID);
this->emit(DB, std::index_sequence_for<Ts...>());
DB << T->isSizelessType() << T;
}
};
enum class CompleteTypeKind {
/// Apply the normal rules for complete types. In particular,
/// treat all sizeless types as incomplete.
Normal,
/// Relax the normal rules for complete types so that they include
/// sizeless built-in types.
AcceptSizeless,
// FIXME: Eventually we should flip the default to Normal and opt in
// to AcceptSizeless rather than opt out of it.
Default = AcceptSizeless
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(const Decl *Entity) {
return Entity->getOwningModule();
}
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
// When loading a non-modular PCH files, this is used to restore module
// visibility.
void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) {
VisibleModules.setVisible(Mod, ImportLoc);
}
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return D->isUnconditionallyVisible() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind = CompleteTypeKind::Default) {
return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
CompleteTypeKind Kind, unsigned DiagID);
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser);
}
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) {
return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID);
}
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, CompleteTypeKind::Normal, Diagnoser);
}
/// Get the type of expression E, triggering instantiation to complete the
/// type if necessary -- that is, if the expression refers to a templated
/// static data member of incomplete array type.
///
/// May still return an incomplete type if instantiation was not possible or
/// if the type is incomplete for a different reason. Use
/// RequireCompleteExprType instead if a diagnostic is expected for an
/// incomplete expression type.
QualType getCompletedType(Expr *E);
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind,
TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser);
}
template <typename... Ts>
bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID,
const Ts &... Args) {
SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, CompleteTypeKind::Normal, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType getDecltypeForParenthesizedExpr(Expr *E);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
/// This name is not a type or template in this context, but might be
/// something else.
NC_Unknown,
/// Classification failed; an error has been produced.
NC_Error,
/// The name has been typo-corrected to a keyword.
NC_Keyword,
/// The name was classified as a type.
NC_Type,
/// The name was classified as a specific non-type, non-template
/// declaration. ActOnNameClassifiedAsNonType should be called to
/// convert the declaration to an expression.
NC_NonType,
/// The name was classified as an ADL-only function name.
/// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the
/// result to an expression.
NC_UndeclaredNonType,
/// The name denotes a member of a dependent type that could not be
/// resolved. ActOnNameClassifiedAsDependentNonType should be called to
/// convert the result to an expression.
NC_DependentNonType,
/// The name was classified as an overload set, and an expression
/// representing that overload set has been formed.
/// ActOnNameClassifiedAsOverloadSet should be called to form a suitable
/// expression referencing the overload set.
NC_OverloadSet,
/// The name was classified as a template whose specializations are types.
NC_TypeTemplate,
/// The name was classified as a variable template name.
NC_VarTemplate,
/// The name was classified as a function template name.
NC_FunctionTemplate,
/// The name was classified as an ADL-only function template name.
NC_UndeclaredTemplate,
/// The name was classified as a concept name.
NC_Concept,
};
class NameClassification {
NameClassificationKind Kind;
union {
ExprResult Expr;
NamedDecl *NonTypeDecl;
TemplateName Template;
ParsedType Type;
};
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification OverloadSet(ExprResult E) {
NameClassification Result(NC_OverloadSet);
Result.Expr = E;
return Result;
}
static NameClassification NonType(NamedDecl *D) {
NameClassification Result(NC_NonType);
Result.NonTypeDecl = D;
return Result;
}
static NameClassification UndeclaredNonType() {
return NameClassification(NC_UndeclaredNonType);
}
static NameClassification DependentNonType() {
return NameClassification(NC_DependentNonType);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification Concept(TemplateName Name) {
NameClassification Result(NC_Concept);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ExprResult getExpression() const {
assert(Kind == NC_OverloadSet);
return Expr;
}
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
NamedDecl *getNonTypeDecl() const {
assert(Kind == NC_NonType);
return NonTypeDecl;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_Concept ||
Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_Concept:
return TNK_Concept_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
CorrectionCandidateCallback *CCC = nullptr);
/// Act on the result of classifying a name as an undeclared (ADL-only)
/// non-type declaration.
ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name,
SourceLocation NameLoc);
/// Act on the result of classifying a name as an undeclared member of a
/// dependent base class.
ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsAddressOfOperand);
/// Act on the result of classifying a name as a specific non-type
/// declaration.
ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS,
NamedDecl *Found,
SourceLocation NameLoc,
const Token &NextToken);
/// Act on the result of classifying a name as an overload set.
ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
bool tryToFixVariablyModifiedVarType(TypeSourceInfo *&TInfo,
QualType &T, SourceLocation Loc,
unsigned FailedFoldDiagID);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
NamedDecl *getShadowedDeclaration(const BindingDecl *D,
const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
enum class CheckConstexprKind {
/// Diagnose issues that are non-constant or that are extensions.
Diagnose,
/// Identify whether this function satisfies the formal rules for constexpr
/// functions in the current lanugage mode (with no extensions).
CheckValid
};
bool CheckConstexprFunctionDefinition(const FunctionDecl *FD,
CheckConstexprKind Kind);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
Decl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
ExprResult ConvertParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit);
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D);
ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr);
ExprResult ActOnRequiresClause(ExprResult ConstraintExpr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
/// For a defaulted function, the kind of defaulted function that it is.
class DefaultedFunctionKind {
CXXSpecialMember SpecialMember : 8;
DefaultedComparisonKind Comparison : 8;
public:
DefaultedFunctionKind()
: SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) {
}
DefaultedFunctionKind(CXXSpecialMember CSM)
: SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {}
DefaultedFunctionKind(DefaultedComparisonKind Comp)
: SpecialMember(CXXInvalid), Comparison(Comp) {}
bool isSpecialMember() const { return SpecialMember != CXXInvalid; }
bool isComparison() const {
return Comparison != DefaultedComparisonKind::None;
}
explicit operator bool() const {
return isSpecialMember() || isComparison();
}
CXXSpecialMember asSpecialMember() const { return SpecialMember; }
DefaultedComparisonKind asComparison() const { return Comparison; }
/// Get the index of this function kind for use in diagnostics.
unsigned getDiagnosticIndex() const {
static_assert(CXXInvalid > CXXDestructor,
"invalid should have highest index");
static_assert((unsigned)DefaultedComparisonKind::None == 0,
"none should be equal to zero");
return SpecialMember + (unsigned)Comparison;
}
};
DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) {
return getDefaultedFunctionKind(MD).asSpecialMember();
}
DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) {
return getDefaultedFunctionKind(FD).asComparison();
}
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Enter a template parameter scope, after it's been associated with a particular
/// DeclContext. Causes lookup within the scope to chain through enclosing contexts
/// in the correct order.
void EnterTemplatedContext(Scope *S, DeclContext *DC);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *
mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated,
VersionTuple Obsoleted, bool IsUnavailable,
StringRef Message, bool IsStrict, StringRef Replacement,
AvailabilityMergeKind AMK, int Priority);
TypeVisibilityAttr *
mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
TypeVisibilityAttr::VisibilityType Vis);
VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI,
VisibilityAttr::VisibilityType Vis);
UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef UuidAsWritten, MSGuidDecl *GuidDecl);
DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI);
DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI);
MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D,
const AttributeCommonInfo &CI,
bool BestCase,
MSInheritanceModel Model);
FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI,
IdentifierInfo *Format, int FormatIdx,
int FirstArg);
SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Name);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D,
const AttributeCommonInfo &CI,
const IdentifierInfo *Ident);
MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI);
SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const SwiftNameAttr &SNA,
StringRef Name);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D,
const AttributeCommonInfo &CI);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
WebAssemblyImportNameAttr *mergeImportNameAttr(
Decl *D, const WebAssemblyImportNameAttr &AL);
WebAssemblyImportModuleAttr *mergeImportModuleAttr(
Decl *D, const WebAssemblyImportModuleAttr &AL);
EnforceTCBAttr *mergeEnforceTCBAttr(Decl *D, const EnforceTCBAttr &AL);
EnforceTCBLeafAttr *mergeEnforceTCBLeafAttr(Decl *D,
const EnforceTCBLeafAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true,
bool ConsiderRequiresClauses = true);
enum class AllowedExplicit {
/// Allow no explicit functions to be used.
None,
/// Allow explicit conversion functions but not explicit constructors.
Conversions,
/// Allow both explicit conversion functions and explicit constructors.
All
};
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
AllowedExplicit AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformAggregateInitializationForOverloadResolution(
const InitializedEntity &Entity, InitListExpr *From);
bool IsStringInit(Expr *Init, const ArrayType *AT);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_ArrayBound, ///< Array bound in array declarator or new-expression.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE,
NamedDecl *Dest = nullptr);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false,
OverloadCandidateParamOrder PO = {});
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None,
OverloadCandidateParamOrder PO = {});
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
OverloadCandidateParamOrder PO = {});
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
OverloadCandidateParamOrder PO = {});
bool CheckNonDependentConversions(
FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions, bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(),
Expr::Classification ObjectClassification = {},
OverloadCandidateParamOrder PO = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddNonMemberOperatorCandidates(
const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
OverloadCandidateParamOrder PO = {});
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(
NamedDecl *Found, FunctionDecl *Fn,
OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(),
QualType DestType = QualType(), bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc,
ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfSingleOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
void AddOverloadedCallCandidates(
LookupResult &R, TemplateArgumentListInfo *ExplicitTemplateArgs,
ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass,
NestedNameSpecifierLoc NNSLoc,
DeclarationNameInfo DNI,
const UnresolvedSetImpl &Fns,
bool PerformADL = true);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet,
OverloadedOperatorKind Op,
const UnresolvedSetImpl &Fns,
ArrayRef<Expr *> Args, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true,
bool AllowRewrittenCandidates = true,
FunctionDecl *DefaultedFn = nullptr);
ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
FunctionDecl *DefaultedFn);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
bool AllowRecovery = false);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up a name following ~ in a destructor name. This is an ordinary
/// lookup, but prefers tags to typedefs.
LookupDestructorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplatePack,
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, SourceLocation TypoLoc);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupBuiltin(LookupResult &R);
void LookupNecessaryTypesForBuiltin(Scope *S, unsigned ID);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult
LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys,
bool AllowRaw, bool AllowTemplate,
bool AllowStringTemplate, bool DiagnoseMissing,
StringLiteral *StringLit = nullptr);
bool isKnownName(StringRef name);
/// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs.
enum class FunctionEmissionStatus {
Emitted,
CUDADiscarded, // Discarded due to CUDA/HIP hostness
OMPDiscarded, // Discarded due to OpenMP hostness
TemplateDiscarded, // Discarded due to uninstantiated templates
Unknown,
};
FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl,
bool Final = false);
DeviceDiagnosticReason getEmissionReason(const FunctionDecl *Decl);
// Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check.
bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param RecoverUncorrectedTypos If true, when typo correction fails, it
/// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult CorrectDelayedTyposInExpr(
Expr *E, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult CorrectDelayedTyposInExpr(
ExprResult ER, VarDecl *InitDecl = nullptr,
bool RecoverUncorrectedTypos = false,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid()
? ER
: CorrectDelayedTyposInExpr(ER.get(), InitDecl,
RecoverUncorrectedTypos, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
/// Attempts to produce a RecoveryExpr after some AST node cannot be created.
ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End,
ArrayRef<Expr *> SubExprs,
QualType T = QualType());
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
FunctionDecl *CreateBuiltin(IdentifierInfo *II, QualType Type, unsigned ID,
SourceLocation Loc);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
FunctionDecl *FD);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Handles semantic checking for features that are common to all attributes,
/// such as checking whether a parameter was properly specified, or the
/// correct number of arguments were passed, etc. Returns true if the
/// attribute has been diagnosed.
bool checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A);
bool checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
llvm::Error isValidSectionSpecifier(StringRef Str);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceModel SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Process the attributes before creating an attributed statement. Returns
/// the semantic attributes that have been processed.
void ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesWithRange &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
/// Returns default addr space for method qualifiers.
LangAS getDefaultCXXMethodAddrSpace() const;
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnAfterCompoundStatementLeadingPragmas();
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr);
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult BuildAttributedStmt(SourceLocation AttrsLoc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(const ParsedAttributesWithRange &AttrList,
Stmt *SubStmt);
bool CheckRebuiltAttributedStmtAttributes(ArrayRef<const Attr *> Attrs);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
SourceLocation LParenLoc, Stmt *InitStmt,
ConditionResult Cond,
SourceLocation RParenLoc);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc,
ConditionResult Cond, SourceLocation RParenLoc,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params,
unsigned OpenMPCaptureLevel = 0);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_AllowRValueReferenceType = 8,
CES_ImplicitlyMovableCXX11CXX14CXX17 =
(CES_AllowParameters | CES_AllowDifferentTypes),
CES_ImplicitlyMovableCXX20 =
(CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables | CES_AllowRValueReferenceType),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
ParsingClassDepth++;
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
ParsingClassDepth--;
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult CheckUnevaluatedOperand(Expr *E);
void CheckUnusedVolatileAssignment(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Try to convert an expression \p E to type \p Ty. Returns the result of the
/// conversion.
ExprResult tryConvertExprToType(Expr *E, QualType Ty);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool DiagnoseDependentMemberLookup(LookupResult &R);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S,
IdentifierInfo *II);
ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
DeclRefExpr *
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(
const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs, const Scope *S,
UnresolvedLookupExpr *AsULE = nullptr);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand);
ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, ParsedType Ty);
ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc,
SourceLocation LParen,
SourceLocation RParen, Expr *E);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx,
Expr *ColumnIdx,
SourceLocation RBLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound,
SourceLocation ColonLocFirst,
SourceLocation ColonLocSecond,
Expr *Length, Expr *Stride,
SourceLocation RBLoc);
ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc,
SourceLocation RParenLoc,
ArrayRef<Expr *> Dims,
ArrayRef<SourceRange> Brackets);
/// Data structure for iterator expression.
struct OMPIteratorData {
IdentifierInfo *DeclIdent = nullptr;
SourceLocation DeclIdentLoc;
ParsedType Type;
OMPIteratorExpr::IteratorRange Range;
SourceLocation AssignLoc;
SourceLocation ColonLoc;
SourceLocation SecColonLoc;
};
ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc,
SourceLocation LLoc, SourceLocation RLoc,
ArrayRef<OMPIteratorData> Data);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false,
bool AllowRecovery = false);
enum class AtomicArgumentOrder { API, AST };
ExprResult
BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange,
SourceLocation RParenLoc, MultiExprArg Args,
AtomicExpr::AtomicOp Op,
AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult BuildInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation EqualOrColonLoc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc,
UnresolvedSetImpl &Functions);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc);
ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc, unsigned TemplateDepth);
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
ExprResult BuildAsTypeExpr(Expr *E, QualType DestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
enum class ComparisonCategoryUsage {
/// The '<=>' operator was used in an expression and a builtin operator
/// was selected.
OperatorInExpression,
/// A defaulted 'operator<=>' needed the comparison category. This
/// typically only applies to 'std::strong_ordering', due to the implicit
/// fallback return value.
DefaultedOperator,
};
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc,
ComparisonCategoryUsage Usage);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E) { CalledStmt(E); }
/// Integrate an invoked statement into the collected data.
void CalledStmt(Stmt *S);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Produce notes explaining why a defaulted function was defined as deleted.
void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
/// Wrap the expression in a ConstantExpr if it is a potential immediate
/// invocation.
ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
QualType DeclInitType, MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr *> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse
/// {dynamic,static,reinterpret,const,addrspace}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee,
SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class,
SourceRange IntroducerRange,
TypeSourceInfo *MethodType,
SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Expr *TrailingRequiresClause);
/// Number lambda for linkage purposes if necessary.
void handleLambdaNumbering(
CXXRecordDecl *Class, CXXMethodDecl *Method,
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc,
ExprResult RequiresClause);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType,
CallingConv CC);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
/// Check whether the given expression is a valid constraint expression.
/// A diagnostic is emitted if it is not, false is returned, and
/// PossibleNonPrimary will be set to true if the failure might be due to a
/// non-primary expression being used as an atomic constraint.
bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(),
bool *PossibleNonPrimary = nullptr,
bool IsTrailingRequiresClause = false);
private:
/// Caches pairs of template-like decls whose associated constraints were
/// checked for subsumption and whether or not the first's constraints did in
/// fact subsume the second's.
llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache;
/// Caches the normalized associated constraints of declarations (concepts or
/// constrained declarations). If an error occurred while normalizing the
/// associated constraints of the template or concept, nullptr will be cached
/// here.
llvm::DenseMap<NamedDecl *, NormalizedConstraint *>
NormalizationCache;
llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &>
SatisfactionCache;
public:
const NormalizedConstraint *
getNormalizedAssociatedConstraints(
NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints);
/// \brief Check whether the given declaration's associated constraints are
/// at least as constrained than another declaration's according to the
/// partial ordering of constraints.
///
/// \param Result If no error occurred, receives the result of true if D1 is
/// at least constrained than D2, and false otherwise.
///
/// \returns true if an error occurred, false otherwise.
bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1,
NamedDecl *D2, ArrayRef<const Expr *> AC2,
bool &Result);
/// If D1 was not at least as constrained as D2, but would've been if a pair
/// of atomic constraints involved had been declared in a concept and not
/// repeated in two separate places in code.
/// \returns true if such a diagnostic was emitted, false otherwise.
bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1,
ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2);
/// \brief Check whether the given list of constraint expressions are
/// satisfied (as if in a 'conjunction') given template arguments.
/// \param Template the template-like entity that triggered the constraints
/// check (either a concept or a constrained entity).
/// \param ConstraintExprs a list of constraint expressions, treated as if
/// they were 'AND'ed together.
/// \param TemplateArgs the list of template arguments to substitute into the
/// constraint expression.
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
/// \param Satisfaction if true is returned, will contain details of the
/// satisfaction, with enough information to diagnose an unsatisfied
/// expression.
/// \returns true if an error occurred and satisfaction could not be checked,
/// false otherwise.
bool CheckConstraintSatisfaction(
const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction);
/// \brief Check whether the given non-dependent constraint expression is
/// satisfied. Returns false and updates Satisfaction with the satisfaction
/// verdict if successful, emits a diagnostic and returns true if an error
/// occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction);
/// Check whether the given function decl's trailing requires clause is
/// satisfied, if any. Returns false and updates Satisfaction with the
/// satisfaction verdict if successful, emits a diagnostic and returns true if
/// an error occured and satisfaction could not be determined.
///
/// \returns true if an error occurred, false otherwise.
bool CheckFunctionConstraints(const FunctionDecl *FD,
ConstraintSatisfaction &Satisfaction,
SourceLocation UsageLoc = SourceLocation());
/// \brief Ensure that the given template arguments satisfy the constraints
/// associated with the given template, emitting a diagnostic if they do not.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateArgs The converted, canonicalized template arguments.
///
/// \param TemplateIDRange The source range of the template id that
/// caused the constraints check.
///
/// \returns true if the constrains are not satisfied or could not be checked
/// for satisfaction, false if the constraints are satisfied.
bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange TemplateIDRange);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
/// \param First whether this is the first time an unsatisfied constraint is
/// diagnosed for this error.
void
DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction,
bool First = true);
/// \brief Emit diagnostics explaining why a constraint expression was deemed
/// unsatisfied.
void
DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction,
bool First = true);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// Mark destructors of virtual bases of this class referenced. In the Itanium
/// C++ ABI, this is done when emitting a destructor for any non-abstract
/// class. In the Microsoft C++ ABI, this is done any time a class's
/// destructor is referenced.
void MarkVirtualBaseDestructorsReferenced(
SourceLocation Location, CXXRecordDecl *ClassDecl,
llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr);
/// Do semantic checks to allow the complete destructor variant to be emitted
/// when the destructor is defined in another translation unit. In the Itanium
/// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they
/// can be emitted in separate TUs. To emit the complete variant, run a subset
/// of the checks performed when emitting a regular destructor.
void CheckCompleteDestructorVariant(SourceLocation CurrentLocation,
CXXDestructorDecl *Dtor);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
/// Add gsl::Pointer attribute to std::container::iterator
/// \param ND The declaration that introduces the name
/// std::container::iterator. \param UnderlyingRecord The record named by ND.
void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord);
/// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types.
void inferGslOwnerPointerAttribute(CXXRecordDecl *Record);
/// Add [[gsl::Pointer]] attributes for std:: types.
void inferGslPointerAttribute(TypedefNameDecl *TD);
void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass();
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Decl *Template,
llvm::function_ref<Scope *()> EnterScope);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD);
bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD,
CXXSpecialMember CSM);
void CheckDelayedMemberExceptionSpecs();
bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD,
DefaultedComparisonKind DCK);
void DeclareImplicitEqualityComparison(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD,
DefaultedComparisonKind DCK);
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbiguousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found, QualType ObjectType,
SourceLocation Loc,
const PartialDiagnostic &Diag);
bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass,
DeclAccessPair Found,
QualType ObjectType) {
return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType,
SourceLocation(), PDiag());
}
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
static NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum TemplateNameIsRequiredTag { TemplateNameIsRequired };
/// Whether and why a template name is required in this lookup.
class RequiredTemplateKind {
public:
/// Template name is required if TemplateKWLoc is valid.
RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation())
: TemplateKW(TemplateKWLoc) {}
/// Template name is unconditionally required.
RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {}
SourceLocation getTemplateKeywordLoc() const {
return TemplateKW.getValueOr(SourceLocation());
}
bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); }
bool isRequired() const { return TemplateKW != SourceLocation(); }
explicit operator bool() const { return isRequired(); }
private:
llvm::Optional<SourceLocation> TemplateKW;
};
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(
LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType,
bool EnteringContext, bool &MemberOfUnknownSpecialization,
RequiredTemplateKind RequiredTemplate = SourceLocation(),
AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization,
bool Disambiguation = false);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg, bool HasTypeConstraint);
bool ActOnTypeConstraint(const CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(NestedNameSpecifierLoc NS,
DeclarationNameInfo NameInfo,
ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs,
TemplateTypeParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool AttachTypeConstraint(AutoTypeLoc TL,
NonTypeTemplateParmDecl *ConstrainedParameter,
SourceLocation EllipsisLoc);
bool RequireStructuralType(QualType T, SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid,
bool SuppressDiagnostic = false);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
/// Get a template argument mapping the given template parameter to itself,
/// e.g. for X in \c template<int X>, this would return an expression template
/// argument referencing X.
TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param,
SourceLocation Location);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
/// Get the specialization of the given variable template corresponding to
/// the specified argument list, or a null-but-valid result if the arguments
/// are dependent.
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
/// Form a reference to the specialization of the given variable template
/// corresponding to the specified argument list, or a null-but-valid result
/// if the arguments are dependent.
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &ConceptNameInfo,
NamedDecl *FoundDecl, ConceptDecl *NamedConcept,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \param ConstraintsNotSatisfied If provided, and an error occured, will
/// receive true if the cause for the error is the associated constraints of
/// the template not being satisfied by the template arguments.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true,
bool *ConstraintsNotSatisfied = nullptr);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param,
TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
TypeSourceInfo **TSI,
bool DeducedTSTContext);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc,
bool DeducedTSTContext = true);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
//===--------------------------------------------------------------------===//
// C++ Concepts
//===--------------------------------------------------------------------===//
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
RequiresExprBodyDecl *
ActOnStartRequiresExpr(SourceLocation RequiresKWLoc,
ArrayRef<ParmVarDecl *> LocalParameters,
Scope *BodyScope);
void ActOnFinishRequiresExpr();
concepts::Requirement *ActOnSimpleRequirement(Expr *E);
concepts::Requirement *ActOnTypeRequirement(
SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc,
IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId);
concepts::Requirement *ActOnCompoundRequirement(Expr *E,
SourceLocation NoexceptLoc);
concepts::Requirement *
ActOnCompoundRequirement(
Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS,
TemplateIdAnnotation *TypeConstraint, unsigned Depth);
concepts::Requirement *ActOnNestedRequirement(Expr *Constraint);
concepts::ExprRequirement *
BuildExprRequirement(
Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::ExprRequirement *
BuildExprRequirement(
concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag,
bool IsSatisfied, SourceLocation NoexceptLoc,
concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement);
concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type);
concepts::TypeRequirement *
BuildTypeRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
concepts::NestedRequirement *BuildNestedRequirement(Expr *E);
concepts::NestedRequirement *
BuildNestedRequirement(
concepts::Requirement::SubstitutionDiagnostic *SubstDiag);
ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc,
RequiresExprBodyDecl *Body,
ArrayRef<ParmVarDecl *> LocalParameters,
ArrayRef<concepts::Requirement *> Requirements,
SourceLocation ClosingBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression.
UPPC_Block,
/// A type constraint.
UPPC_TypeConstraint,
// A requirement in a requires-expression.
UPPC_Requirement,
// A requires-clause.
UPPC_RequiresClause,
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given requirees-expression contains an unexpanded reference to one
/// of its own parameter packs, diagnose the error.
///
/// \param RE The requiress-expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// The deduced arguments did not satisfy the constraints associated
/// with the template.
TDK_ConstraintsNotSatisfied,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None,
bool IgnoreConstraints = false);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(
FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
unsigned NumCallArguments2, bool Reversed = false);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced,
unsigned Depth, llvm::SmallBitVector &Used);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are instantiating a requirement of a requires expression.
RequirementInstantiation,
/// We are checking the satisfaction of a nested requirement of a requires
/// expression.
NestedRequirementConstraintsCheck,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are declaring an implicit 'operator==' for a defaulted
/// 'operator<=>'.
DeclaringImplicitEqualityComparison,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
// We are checking the constraints associated with a constrained entity or
// the constraint expression of a concept. This includes the checks that
// atomic constraints have the type 'bool' and that they can be constant
// evaluated.
ConstraintsCheck,
// We are substituting template arguments into a constraint expression.
ConstraintSubstitution,
// We are normalizing a constraint expression.
ConstraintNormalization,
// We are substituting into the parameter mapping of an atomic constraint
// during normalization.
ParameterMappingSubstitution,
/// We are rewriting a comparison operator in terms of an operator<=>.
RewritingOperatorAsSpaceship,
/// We are initializing a structured binding.
InitializingStructuredBinding,
/// We are marking a class as __dllexport.
MarkingClassDllexported,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintsCheck {};
/// \brief Note that we are checking the constraints associated with some
/// constrained entity (a concept declaration or a template with associated
/// constraints).
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintsCheck, NamedDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
struct ConstraintSubstitution {};
/// \brief Note that we are checking a constraint expression associated
/// with a template declaration or as part of the satisfaction check of a
/// concept.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintSubstitution, NamedDecl *Template,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange);
struct ConstraintNormalization {};
/// \brief Note that we are normalizing a constraint expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ConstraintNormalization, NamedDecl *Template,
SourceRange InstantiationRange);
struct ParameterMappingSubstitution {};
/// \brief Note that we are subtituting into the parameter mapping of an
/// atomic constraint during constraint normalization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParameterMappingSubstitution, NamedDecl *Template,
SourceRange InstantiationRange);
/// \brief Note that we are substituting template arguments into a part of
/// a requirement of a requires expression.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::Requirement *Req,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// \brief Note that we are checking the satisfaction of the constraint
/// expression inside of a nested requirement.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
concepts::NestedRequirement *Req, ConstraintsCheck,
SourceRange InstantiationRange = SourceRange());
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) {
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
} else {
// Template instantiations in the PCH may be delayed until the TU.
S.PendingInstantiations.swap(SavedPendingInstantiations);
S.PendingInstantiations.insert(S.PendingInstantiations.end(),
SavedPendingInstantiations.begin(),
SavedPendingInstantiations.end());
}
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateArgumentListInfo &Outputs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the name and return type of a defaulted 'operator<=>' to form
/// an implicit 'operator=='.
FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD,
FunctionDecl *Spaceship);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
bool CheckInstantiatedFunctionTemplateConstraints(
SourceLocation PointOfInstantiation, FunctionDecl *Decl,
ArrayRef<TemplateArgument> TemplateArgs,
ConstraintSatisfaction &Satisfaction);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
void deduceOpenCLAddressSpace(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method,
ObjCMethodDecl *overridden);
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaAlignPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaAlignPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispMode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName, int SectionFlags,
NamedDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// Are precise floating point semantics currently enabled?
bool isPreciseFPEnabled() {
return !CurFPFeatures.getAllowFPReassociate() &&
!CurFPFeatures.getNoSignedZero() &&
!CurFPFeatures.getAllowReciprocal() &&
!CurFPFeatures.getAllowApproxFunc();
}
/// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control
void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action,
PragmaFloatControlKind Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC);
/// Called on well formed
/// \#pragma clang fp reassociate
void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled);
/// Called on well formed '\#pragma clang fp' that has option 'exceptions'.
void ActOnPragmaFPExceptions(SourceLocation Loc,
LangOptions::FPExceptionModeKind);
/// Called to set constant rounding mode for floating point operations.
void setRoundingMode(SourceLocation Loc, llvm::RoundingMode);
/// Called to set exception behavior for floating point operations.
void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
template <typename AttrType>
void AddOneConstantPowerTwoValueAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGABankBitsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr **Exprs, unsigned Size);
template <typename AttrType>
void addIntelSingleArgAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
template <typename AttrType>
void addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr, Expr *ZDimExpr);
void AddIntelReqdSubGroupSize(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelReqdSubGroupSizeAttr *
MergeIntelReqdSubGroupSizeAttr(Decl *D, const IntelReqdSubGroupSizeAttr &A);
IntelNamedSubGroupSizeAttr *
MergeIntelNamedSubGroupSizeAttr(Decl *D, const IntelNamedSubGroupSizeAttr &A);
void AddSYCLIntelNumSimdWorkItemsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNumSimdWorkItemsAttr *
MergeSYCLIntelNumSimdWorkItemsAttr(Decl *D,
const SYCLIntelNumSimdWorkItemsAttr &A);
void AddSYCLIntelESimdVectorizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelESimdVectorizeAttr *
MergeSYCLIntelESimdVectorizeAttr(Decl *D,
const SYCLIntelESimdVectorizeAttr &A);
void AddSYCLIntelSchedulerTargetFmaxMhzAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelSchedulerTargetFmaxMhzAttr *MergeSYCLIntelSchedulerTargetFmaxMhzAttr(
Decl *D, const SYCLIntelSchedulerTargetFmaxMhzAttr &A);
void AddSYCLIntelNoGlobalWorkOffsetAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelNoGlobalWorkOffsetAttr *MergeSYCLIntelNoGlobalWorkOffsetAttr(
Decl *D, const SYCLIntelNoGlobalWorkOffsetAttr &A);
void AddSYCLIntelLoopFuseAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelLoopFuseAttr *
MergeSYCLIntelLoopFuseAttr(Decl *D, const SYCLIntelLoopFuseAttr &A);
void AddIntelFPGAPrivateCopiesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
void AddIntelFPGAMaxReplicatesAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAMaxReplicatesAttr *
MergeIntelFPGAMaxReplicatesAttr(Decl *D, const IntelFPGAMaxReplicatesAttr &A);
void AddIntelFPGAForcePow2DepthAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E);
IntelFPGAForcePow2DepthAttr *
MergeIntelFPGAForcePow2DepthAttr(Decl *D,
const IntelFPGAForcePow2DepthAttr &A);
void AddSYCLIntelFPGAInitiationIntervalAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
SYCLIntelFPGAInitiationIntervalAttr *MergeSYCLIntelFPGAInitiationIntervalAttr(
Decl *D, const SYCLIntelFPGAInitiationIntervalAttr &A);
SYCLIntelFPGAMaxConcurrencyAttr *MergeSYCLIntelFPGAMaxConcurrencyAttr(
Decl *D, const SYCLIntelFPGAMaxConcurrencyAttr &A);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
bool IsPackExpansion);
void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T,
bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
Expr *OE);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *ParamExpr);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E);
/// AddAnnotationAttr - Adds an annotation Annot with Args arguments to D.
void AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Annot, MutableArrayRef<Expr *> Args);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *MaxThreads, Expr *MinBlocks);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name,
bool InInstantiation = false);
void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI,
ParameterABI ABI);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *Min, Expr *Max);
/// addSYCLIntelPipeIOAttr - Adds a pipe I/O attribute to a particular
/// declaration.
void addSYCLIntelPipeIOAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ID);
/// AddSYCLIntelFPGAMaxConcurrencyAttr - Adds a max_concurrency attribute to a
/// particular declaration.
void AddSYCLIntelFPGAMaxConcurrencyAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
bool checkAllowedSYCLInitializer(VarDecl *VD,
bool CheckValueDependent = false);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
/// Check that the expression co_await promise.final_suspend() shall not be
/// potentially-throwing.
bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = std::string(Ext);
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
SmallVector<SourceLocation, 4> DeclareTargetNesting;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Returns the number of scopes associated with the construct on the given
/// OpenMP level.
int getNumberOfConstructScopes(unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
/// Helper to keep information about the current `omp begin/end declare
/// variant` nesting.
struct OMPDeclareVariantScope {
/// The associated OpenMP context selector.
OMPTraitInfo *TI;
/// The associated OpenMP context selector mangling.
std::string NameSuffix;
OMPDeclareVariantScope(OMPTraitInfo &TI);
};
/// Return the OMPTraitInfo for the surrounding scope, if any.
OMPTraitInfo *getOMPTraitInfoForSurroundingScope() {
return OMPDeclareVariantScopes.empty() ? nullptr
: OMPDeclareVariantScopes.back().TI;
}
/// The current `omp begin/end declare variant` scopes.
SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes;
/// The current `omp begin/end assumes` scopes.
SmallVector<AssumptionAttr *, 4> OMPAssumeScoped;
/// All `omp assumes` we encountered so far.
SmallVector<AssumptionAttr *, 4> OMPAssumeGlobal;
public:
/// The declarator \p D defines a function in the scope \p S which is nested
/// in an `omp begin/end declare variant` scope. In this method we create a
/// declaration for \p D and rename \p D according to the OpenMP context
/// selector of the surrounding scope. Return all base functions in \p Bases.
void ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists,
SmallVectorImpl<FunctionDecl *> &Bases);
/// Register \p D as specialization of all base functions in \p Bases in the
/// current `omp begin/end declare variant` scope.
void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(
Decl *D, SmallVectorImpl<FunctionDecl *> &Bases);
/// Act on \p D, a function definition inside of an `omp [begin/end] assumes`.
void ActOnFinishedFunctionDefinitionInOpenMPAssumeScope(Decl *D);
/// Can we exit an OpenMP declare variant scope at the moment.
bool isInOpenMPDeclareVariantScope() const {
return !OMPDeclareVariantScopes.empty();
}
/// Given the potential call expression \p Call, determine if there is a
/// specialization via the OpenMP declare variant mechanism available. If
/// there is, return the specialized call expression, otherwise return the
/// original \p Call.
ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope,
SourceLocation LParenLoc, MultiExprArg ArgExprs,
SourceLocation RParenLoc, Expr *ExecConfig);
/// Handle a `omp begin declare variant`.
void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI);
/// Handle a `omp end declare variant`.
void ActOnOpenMPEndDeclareVariant();
/// Checks if the variant/multiversion functions are compatible.
bool areMultiversionVariantFunctionsCompatible(
const FunctionDecl *OldFD, const FunctionDecl *NewFD,
const PartialDiagnostic &NoProtoDiagID,
const PartialDiagnosticAt &NoteCausedDiagIDAt,
const PartialDiagnosticAt &NoSupportDiagIDAt,
const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported,
bool ConstexprSupported, bool CLinkageMayDiffer);
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
/// \param OpenMPCaptureLevel Capture level within an OpenMP construct.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
unsigned OpenMPCaptureLevel) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// If the current region is a range loop-based region, mark the start of the
/// loop construct.
void startOpenMPCXXRangeFor();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
/// Check if the specified global variable must be captured by outer capture
/// regions.
/// \param Level Relative level of nested OpenMP construct for that
/// the check is performed.
bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned CaptureLevel) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp [begin] assume[s]'.
void ActOnOpenMPAssumesDirective(SourceLocation Loc,
OpenMPDirectiveKind DKind,
ArrayRef<StringRef> Assumptions,
bool SkippedClauses);
/// Check if there is an active global `omp begin assumes` directive.
bool isInOpenMPAssumeScope() const { return !OMPAssumeScoped.empty(); }
/// Check if there is an active global `omp assumes` directive.
bool hasGlobalOpenMPAssumes() const { return !OMPAssumeGlobal.empty(); }
/// Called on well-formed '#pragma omp end assumes'.
void ActOnOpenMPEndAssumesDirective();
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S,
QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const;
const ValueDecl *getOpenMPDeclareMapperVarName() const;
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Searches for the provided declaration name for OpenMP declare target
/// directive.
NamedDecl *
lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
NamedDeclSetType &SameDirectiveDecls);
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
OMPDeclareTargetDeclAttr::DevTypeTy DT);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Finishes analysis of the deferred functions calls that may be declared as
/// host/nohost during device/host compilation.
void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc);
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return !DeclareTargetNesting.empty();
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// Called for syntactical loops (ForStmt or CXXForRangeStmt) associated to
/// an OpenMP loop directive.
StmtResult ActOnOpenMPCanonicalLoop(Stmt *AStmt);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '#pragma omp tile' after parsing of its clauses and
/// the associated statement.
StmtResult ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp depobj'.
StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp scan'.
StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp master taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel master taskloop simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp interop'.
StmtResult ActOnOpenMPInteropDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp dispatch' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp masked' after parsing of the
// /associated statement.
StmtResult ActOnOpenMPMaskedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type,
bool IsDeclareSimd = false);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
/// Checks '\#pragma omp declare variant' variant function and original
/// functions after parsing of the associated method/function.
/// \param DG Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The trait info object representing the match clause.
/// \returns None, if the function/variant function are not compatible with
/// the pragma, pair of original function/variant ref expression otherwise.
Optional<std::pair<FunctionDecl *, Expr *>>
checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
/// Called on well-formed '\#pragma omp declare variant' after parsing of
/// the associated method/function.
/// \param FD Function declaration to which declare variant directive is
/// applied to.
/// \param VariantRef Expression that references the variant function, which
/// must be used instead of the original one, specified in \p DG.
/// \param TI The context traits associated with the function variant.
void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef,
OMPTraitInfo &TI, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-form 'sizes' clause.
OMPClause *ActOnOpenMPSizesClause(ArrayRef<Expr *> SizeExprs,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'detach' clause.
OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'order' clause.
OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acq_rel' clause.
OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'acquire' clause.
OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'release' clause.
OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'relaxed' clause.
OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'init' clause.
OMPClause *ActOnOpenMPInitClause(Expr *InteropVar, ArrayRef<Expr *> PrefExprs,
bool IsTarget, bool IsTargetSync,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'use' clause.
OMPClause *ActOnOpenMPUseClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc, SourceLocation EndLoc);
/// Called on well-formed 'destroy' clause.
OMPClause *ActOnOpenMPDestroyClause(Expr *InteropVar, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation VarLoc,
SourceLocation EndLoc);
/// Called on well-formed 'novariants' clause.
OMPClause *ActOnOpenMPNovariantsClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nocontext' clause.
OMPClause *ActOnOpenMPNocontextClause(Expr *Condition,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'filter' clause.
OMPClause *ActOnOpenMPFilterClause(Expr *ThreadID, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
SourceLocation ExtraModifierLoc,
ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc);
/// Called on well-formed 'inclusive' clause.
OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'exclusive' clause.
OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(
ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind,
SourceLocation LPKindLoc, SourceLocation ColonLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation ModifierLoc, SourceLocation ColonLoc,
SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depobj' pseudo clause.
OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
SourceLocation DepLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ModifierLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *
ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
ArrayRef<SourceLocation> MotionModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'use_device_addr' clause.
OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'nontemporal' clause.
OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Data for list of allocators.
struct UsesAllocatorsData {
/// Allocator.
Expr *Allocator = nullptr;
/// Allocator traits.
Expr *AllocatorTraits = nullptr;
/// Locations of '(' and ')' symbols.
SourceLocation LParenLoc, RParenLoc;
};
/// Called on well-formed 'uses_allocators' clause.
OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc,
ArrayRef<UsesAllocatorsData> Data);
/// Called on well-formed 'affinity' clause.
OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc, Expr *Modifier,
ArrayRef<Expr *> Locators);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This function is a no-op if the operand has a function type
// or an array type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not. In the success case,
/// the statement is rewritten to remove implicit nodes from the return
/// value.
bool checkAndRewriteMustTailAttr(Stmt *St, const Attr &MTA);
private:
/// Check whether the given statement can have musttail applied to it,
/// issuing a diagnostic and returning false if not.
bool checkMustTailAttr(const Stmt *St, const Attr &MTA);
public:
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
/// Context in which we're performing a usual arithmetic conversion.
enum ArithConvKind {
/// An arithmetic operation.
ACK_Arithmetic,
/// A bitwise operation.
ACK_BitwiseOp,
/// A comparison.
ACK_Comparison,
/// A conditional (?:) operator.
ACK_Conditional,
/// A compound assignment expression.
ACK_CompAssign,
};
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, ArithConvKind ACK);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatibleFunctionPointer - The assignment is between two function
/// pointers types that are not compatible, but we accept them as an
/// extension.
IncompatibleFunctionPointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true);
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
ExprResult &RHS,
SourceLocation QuestionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
/// Type checking for matrix binary operators.
QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
bool IsCompAssign);
QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign);
bool isValidSveBitcast(QualType srcType, QualType destType);
bool areMatrixTypesOfTheSameDimension(QualType srcTy, QualType destTy);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
// Fake up a scoped enumeration that still contextually converts to bool.
struct ReferenceConversionsScope {
/// The conversions that would be performed on an lvalue of type T2 when
/// binding a reference of type T1 to it, as determined when evaluating
/// whether T1 is reference-compatible with T2.
enum ReferenceConversions {
Qualification = 0x1,
NestedQualification = 0x2,
Function = 0x4,
DerivedToBase = 0x8,
ObjC = 0x10,
ObjCLifetime = 0x20,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime)
};
};
using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions;
ReferenceCompareResult
CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2,
ReferenceConversions *Conv = nullptr);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckMatrixCast - Check type constraints for matrix casts.
// We allow casting between matrixes of the same dimensions i.e. when they
// have the same number of rows and column. Returns true if the cast is
// invalid.
bool CheckMatrixCast(SourceRange R, QualType DestTy, QualType SrcTy,
CastKind &Kind);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual SemaDiagnosticBuilder
diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T);
virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S,
SourceLocation Loc) = 0;
virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc);
virtual ~VerifyICEDiagnoser() {}
};
enum AllowFoldKind {
NoFold,
AllowFold,
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr,
AllowFoldKind CanFold = NoFold);
ExprResult VerifyIntegerConstantExpression(Expr *E,
AllowFoldKind CanFold = NoFold) {
return VerifyIntegerConstantExpression(E, nullptr, CanFold);
}
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
class DeviceDeferredDiagnostic {
public:
DeviceDeferredDiagnostic(SourceLocation SL, const PartialDiagnostic &PD,
DeviceDiagnosticReason R)
: Diagnostic(SL, PD), Reason(R) {}
PartialDiagnosticAt &getDiag() { return Diagnostic; }
DeviceDiagnosticReason getReason() const { return Reason; }
private:
PartialDiagnosticAt Diagnostic;
DeviceDiagnosticReason Reason;
};
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<DeviceDeferredDiagnostic>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics
/// unless \p EmitOnBothSides is true.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
SemaDiagnosticBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder
diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID, FunctionDecl *FD);
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as host code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the host, emits the diagnostics immediately.
/// - If CurContext is a non-host function, just ignore it.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
SemaDiagnosticBuilder diagIfOpenMPHostCode(SourceLocation Loc,
unsigned DiagID, FunctionDecl *FD);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc, unsigned DiagID,
FunctionDecl *FD = nullptr);
SemaDiagnosticBuilder targetDiag(SourceLocation Loc,
const PartialDiagnostic &PD,
FunctionDecl *FD = nullptr) {
return targetDiag(Loc, PD.getDiagID(), FD) << PD;
}
/// Check if the expression is allowed to be used in expressions for the
/// offloading devices.
void checkDeviceDecl(ValueDecl *D, SourceLocation Loc);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D);
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
/// May add implicit CUDAConstantAttr attribute to VD, depending on VD
/// and current compilation settings.
void MaybeAddCUDAConstantAttr(VarDecl *VD);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas by default is host device function unless it has explicit
/// host or device attribute.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Determines the preferred type of the current function argument, by
/// examining the signatures of all possible overloads.
/// Returns null if unknown or ambiguous, or if code completion is off.
///
/// If the code completion point has been reached, also reports the function
/// signatures that were considered.
///
/// FIXME: rename to GuessCallArgumentType to reduce confusion.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
/// Trigger code completion for a record of \p BaseType. \p InitExprs are
/// expressions in the initializer list seen so far and \p D is the current
/// Designation being parsed.
void CodeCompleteDesignator(const QualType BaseType,
llvm::ArrayRef<Expr *> InitExprs,
const Designation &D);
void CodeCompleteAfterIf(Scope *S, bool IsBracedThen);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
bool IsUsingDeclaration, QualType BaseType,
QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteAfterFunctionEquals(Declarator &D);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl, QualType ThisType,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto, SourceLocation Loc);
void CheckArgAlignment(SourceLocation Loc, NamedDecl *FDecl,
StringRef ParamName, QualType ArgTy, QualType ParamTy);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
void CheckSYCLKernelCall(FunctionDecl *CallerFunc, SourceRange CallLoc,
ArrayRef<const Expr *> Args);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg,
bool WantCDE);
bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums);
bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall,
ArrayRef<int> ArgNums);
bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckRISCVBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
CallExpr *TheCall);
bool CheckIntelFPGARegBuiltinFunctionCall(unsigned BuiltinID, CallExpr *Call);
bool CheckIntelFPGAMemBuiltinFunctionCall(CallExpr *Call);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinComplex(CallExpr *TheCall);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum);
bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum,
unsigned ArgBits);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinPPCMMACall(CallExpr *TheCall, const char *TypeDesc);
bool CheckPPCMMAType(QualType Type, SourceLocation TypeLoc);
// Matrix builtin handling.
ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall,
ExprResult CallResult);
ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
ExprResult CallResult);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckFreeArguments(const CallExpr *E);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(const Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void CheckTCBEnforcement(const CallExpr *TheCall, const FunctionDecl *Callee);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Nullable_result = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
bool isCFError(RecordDecl *D);
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// Determine the number of levels of enclosing template parameters. This is
/// only usable while parsing. Note that this does not include dependent
/// contexts in which no template parameters have yet been declared, such as
/// in a terse function template or generic lambda before the first 'auto' is
/// encountered.
unsigned getTemplateDepth(Scope *S) const;
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions;
private:
int ParsingClassDepth = 0;
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
private:
// We store SYCL Kernels here and handle separately -- which is a hack.
// FIXME: It would be best to refactor this.
llvm::SetVector<Decl *> SyclDeviceDecls;
// SYCL integration header instance for current compilation unit this Sema
// is associated with.
std::unique_ptr<SYCLIntegrationHeader> SyclIntHeader;
std::unique_ptr<SYCLIntegrationFooter> SyclIntFooter;
// Used to suppress diagnostics during kernel construction, since these were
// already emitted earlier. Diagnosing during Kernel emissions also skips the
// useful notes that shows where the kernel was called.
bool DiagnosingSYCLKernel = false;
public:
void addSyclDeviceDecl(Decl *d) { SyclDeviceDecls.insert(d); }
llvm::SetVector<Decl *> &syclDeviceDecls() { return SyclDeviceDecls; }
/// Lazily creates and returns SYCL integration header instance.
SYCLIntegrationHeader &getSyclIntegrationHeader() {
if (SyclIntHeader == nullptr)
SyclIntHeader = std::make_unique<SYCLIntegrationHeader>(
getLangOpts().SYCLUnnamedLambda, *this);
return *SyclIntHeader.get();
}
SYCLIntegrationFooter &getSyclIntegrationFooter() {
if (SyclIntFooter == nullptr)
SyclIntFooter = std::make_unique<SYCLIntegrationFooter>(*this);
return *SyclIntFooter.get();
}
void addSyclVarDecl(VarDecl *VD) {
if (LangOpts.SYCLIsDevice && !LangOpts.SYCLIntFooter.empty())
getSyclIntegrationFooter().addVarDecl(VD);
}
enum SYCLRestrictKind {
KernelGlobalVariable,
KernelRTTI,
KernelNonConstStaticDataVariable,
KernelCallVirtualFunction,
KernelUseExceptions,
KernelCallRecursiveFunction,
KernelCallFunctionPointer,
KernelAllocateStorage,
KernelUseAssembly,
KernelCallDllimportFunction,
KernelCallVariadicFunction,
KernelCallUndefinedFunction,
KernelConstStaticVariable
};
bool isKnownGoodSYCLDecl(const Decl *D);
void checkSYCLDeviceVarDecl(VarDecl *Var);
void copySYCLKernelAttrs(const CXXRecordDecl *KernelObj);
void ConstructOpenCLKernel(FunctionDecl *KernelCallerFunc, MangleContext &MC);
void MarkDevices();
/// Emit a diagnostic about the given attribute having a deprecated name, and
/// also emit a fixit hint to generate the new attribute name.
void DiagnoseDeprecatedAttribute(const ParsedAttr &A, StringRef NewScope,
StringRef NewName);
/// Diagnoses an attribute in the 'intelfpga' namespace and suggests using
/// the attribute in the 'intel' namespace instead.
void CheckDeprecatedSYCLAttributeSpelling(const ParsedAttr &A,
StringRef NewName = "");
/// Creates a SemaDiagnosticBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurLexicalContext is a kernel function or it is known that the
/// function will be emitted for the device, emits the diagnostics
/// immediately.
/// - If CurLexicalContext is a function and we are compiling
/// for the device, but we don't know that this function will be codegen'ed
/// for devive yet, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// Diagnose __float128 type usage only from SYCL device code if the current
/// target doesn't support it
/// if (!S.Context.getTargetInfo().hasFloat128Type() &&
/// S.getLangOpts().SYCLIsDevice)
/// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128";
SemaDiagnosticBuilder SYCLDiagIfDeviceCode(
SourceLocation Loc, unsigned DiagID,
DeviceDiagnosticReason Reason = DeviceDiagnosticReason::Sycl |
DeviceDiagnosticReason::Esimd);
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed, creates a deferred diagnostic to be emitted if
/// and when the caller is codegen'ed, and returns true.
///
/// - Otherwise, returns true without emitting any diagnostics.
///
/// Adds Callee to DeviceCallGraph if we don't know if its caller will be
/// codegen'ed yet.
bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Finishes analysis of the deferred functions calls that may be not
/// properly declared for device compilation.
void finalizeSYCLDelayedAnalysis(const FunctionDecl *Caller,
const FunctionDecl *Callee,
SourceLocation Loc,
DeviceDiagnosticReason Reason);
/// Tells whether given variable is a SYCL explicit SIMD extension's "private
/// global" variable - global variable in the private address space.
bool isSYCLEsimdPrivateGlobal(VarDecl *VDecl) {
return getLangOpts().SYCLIsDevice && VDecl->hasAttr<SYCLSimdAttr>() &&
VDecl->hasGlobalStorage() &&
(VDecl->getType().getAddressSpace() == LangAS::sycl_private);
}
};
template <typename AttrType>
void Sema::addIntelSingleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return;
E = ICE.get();
int32_t ArgInt = ArgVal.getSExtValue();
if (CI.getParsedKind() == ParsedAttr::AT_SYCLIntelMaxGlobalWorkDim) {
if (ArgInt < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< CI << /*non-negative*/ 1;
return;
}
}
if (CI.getParsedKind() == ParsedAttr::AT_SYCLIntelMaxGlobalWorkDim) {
if (ArgInt > 3) {
Diag(E->getBeginLoc(), diag::err_attribute_argument_out_of_range)
<< CI << 0 << 3 << E->getSourceRange();
return;
}
}
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
inline Expr *checkMaxWorkSizeAttrExpr(Sema &S, const AttributeCommonInfo &CI,
Expr *E) {
assert(E && "Attribute must have an argument.");
if (!E->isInstantiationDependent()) {
llvm::APSInt ArgVal;
ExprResult ICE = S.VerifyIntegerConstantExpression(E, &ArgVal);
if (ICE.isInvalid())
return nullptr;
E = ICE.get();
if (ArgVal.isNegative()) {
S.Diag(E->getExprLoc(),
diag::warn_attribute_requires_non_negative_integer_argument)
<< E->getType() << S.Context.UnsignedLongLongTy
<< E->getSourceRange();
return E;
}
unsigned Val = ArgVal.getZExtValue();
if (Val == 0) {
S.Diag(E->getExprLoc(), diag::err_attribute_argument_is_zero)
<< CI << E->getSourceRange();
return nullptr;
}
}
return E;
}
template <typename WorkGroupAttrType>
void Sema::addIntelTripleArgAttr(Decl *D, const AttributeCommonInfo &CI,
Expr *XDimExpr, Expr *YDimExpr,
Expr *ZDimExpr) {
assert((XDimExpr && YDimExpr && ZDimExpr) &&
"argument has unexpected null value");
// Accept template arguments for now as they depend on something else.
// We'll get to check them when they eventually get instantiated.
if (!XDimExpr->isValueDependent() && !YDimExpr->isValueDependent() &&
!ZDimExpr->isValueDependent()) {
// Save ConstantExpr in semantic attribute
XDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, XDimExpr);
YDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, YDimExpr);
ZDimExpr = checkMaxWorkSizeAttrExpr(*this, CI, ZDimExpr);
if (!XDimExpr || !YDimExpr || !ZDimExpr)
return;
}
D->addAttr(::new (Context)
WorkGroupAttrType(Context, CI, XDimExpr, YDimExpr, ZDimExpr));
}
template <typename AttrType>
void Sema::AddOneConstantPowerTwoValueAttr(Decl *D,
const AttributeCommonInfo &CI,
Expr *E) {
AttrType TmpAttr(Context, CI, E);
if (!E->isValueDependent()) {
llvm::APSInt Value;
ExprResult ICE = VerifyIntegerConstantExpression(E, &Value);
if (ICE.isInvalid())
return;
if (!Value.isStrictlyPositive()) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< CI << /*positive*/ 0;
return;
}
if (!Value.isPowerOf2()) {
Diag(CI.getLoc(), diag::err_attribute_argument_not_power_of_two)
<< &TmpAttr;
return;
}
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *BBA = D->getAttr<IntelFPGABankBitsAttr>()) {
unsigned NumBankBits = BBA->args_size();
if (NumBankBits != Value.ceilLogBase2()) {
Diag(TmpAttr.getLocation(), diag::err_bankbits_numbanks_conflicting);
return;
}
}
}
E = ICE.get();
}
if (!D->hasAttr<IntelFPGAMemoryAttr>())
D->addAttr(IntelFPGAMemoryAttr::CreateImplicit(
Context, IntelFPGAMemoryAttr::Default));
// We are adding a user NumBanks, drop any implicit default.
if (IntelFPGANumBanksAttr::classof(&TmpAttr)) {
if (auto *NBA = D->getAttr<IntelFPGANumBanksAttr>())
if (NBA->isImplicit())
D->dropAttr<IntelFPGANumBanksAttr>();
}
D->addAttr(::new (Context) AttrType(Context, CI, E));
}
template <typename FPGALoopAttrT>
FPGALoopAttrT *Sema::BuildSYCLIntelFPGALoopAttr(const AttributeCommonInfo &A,
Expr *E) {
if (!E && !(A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce))
return nullptr;
if (E && !E->isInstantiationDependent()) {
Optional<llvm::APSInt> ArgVal = E->getIntegerConstantExpr(getASTContext());
if (!ArgVal) {
Diag(E->getExprLoc(), diag::err_attribute_argument_type)
<< A.getAttrName() << AANT_ArgumentIntegerConstant
<< E->getSourceRange();
return nullptr;
}
int Val = ArgVal->getSExtValue();
if (A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGAInitiationInterval ||
A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCoalesce) {
if (Val <= 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* positive */ 0;
return nullptr;
}
} else if (A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxConcurrency ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGAMaxInterleaving ||
A.getParsedKind() ==
ParsedAttr::AT_SYCLIntelFPGASpeculatedIterations ||
A.getParsedKind() == ParsedAttr::AT_SYCLIntelFPGALoopCount) {
if (Val < 0) {
Diag(E->getExprLoc(), diag::err_attribute_requires_positive_integer)
<< A.getAttrName() << /* non-negative */ 1;
return nullptr;
}
} else {
llvm_unreachable("unknown sycl fpga loop attr");
}
}
return new (Context) FPGALoopAttrT(Context, A, E);
}
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
template <>
void Sema::PragmaStack<Sema::AlignPackInfo>::Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
AlignPackInfo Value);
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getHashValue());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
jacobi.c | #include <stdio.h>
#include <math.h>
#ifdef _OPENMP
#include <omp.h>
#endif
// Add timing support
#include <sys/time.h>
double time_stamp()
{
struct timeval t;
double time;
gettimeofday(&t, NULL);
time = t.tv_sec + 1.0e-6*t.tv_usec;
return time;
}
double time1, time2;
void driver(void);
void initialize(void);
void jacobi(void);
void error_check(void);
/************************************************************
* program to solve a finite difference
* discretization of Helmholtz equation :
* (d2/dx2)u + (d2/dy2)u - alpha u = f
* using Jacobi iterative method.
*
* Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998
* Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998
*
* This C version program is translated by
* Chunhua Liao, University of Houston, Jan, 2005
*
* Directives are used in this code to achieve parallelism.
* All do loops are parallelized with default 'static' scheduling.
*
* Input : n - grid dimension in x direction
* m - grid dimension in y direction
* alpha - Helmholtz constant (always greater than 0.0)
* tol - error tolerance for iterative solver
* relax - Successice over relaxation parameter
* mits - Maximum iterations for iterative solver
*
* On output
* : u(n,m) - Dependent variable (solutions)
* : f(n,m) - Right hand side function
*************************************************************/
#define MSIZE 500
int n,m,mits;
double tol,relax=1.0,alpha=0.0543;
double u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE];
double dx,dy;
int main (void)
{
float toler;
/* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE);
scanf ("%d",&n);
scanf ("%d",&m);
printf("Input tol - error tolerance for iterative solver\n");
scanf("%f",&toler);
tol=(double)toler;
printf("Input mits - Maximum iterations for solver\n");
scanf("%d",&mits);
*/
n=MSIZE;
m=MSIZE;
tol=0.0000000001;
mits=5000;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp single
printf("Running using %d threads...\n",omp_get_num_threads());
}
#endif
driver ( ) ;
return 0;
}
/*************************************************************
* Subroutine driver ()
* This is where the arrays are allocated and initialized.
*
* Working variables/arrays
* dx - grid spacing in x direction
* dy - grid spacing in y direction
*************************************************************/
void driver( )
{
initialize();
time1 = time_stamp();
/* Solve Helmholtz equation */
jacobi ();
time2 = time_stamp();
printf("------------------------\n");
printf("Execution time = %f\n",time2-time1);
/* error_check (n,m,alpha,dx,dy,u,f)*/
error_check ( );
}
/* subroutine initialize (n,m,alpha,dx,dy,u,f)
******************************************************
* Initializes data
* Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2)
*
******************************************************/
void initialize( )
{
int i,j, xx,yy;
//double PI=3.1415926;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
/* Initialize initial condition and RHS */
#pragma aitool fp_plus(2) fp_multiply(2)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
}
#pragma aitool fp_minus(6) fp_multiply(5)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
#pragma aitool fp_plus(2) fp_minus(6) fp_multiply(7)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx =(int)( -1.0 + dx * (i-1));
yy = (int)(-1.0 + dy * (j-1)) ;
u[i][j] = 0.0;
f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\
- 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy);
}
}
/* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit)
******************************************************************
* Subroutine HelmholtzJ
* Solves poisson equation on rectangular grid assuming :
* (1) Uniform discretization in each direction, and
* (2) Dirichlect boundary conditions
*
* Jacobi method is used in this routine
*
* Input : n,m Number of grid points in the X/Y directions
* dx,dy Grid spacing in the X/Y directions
* alpha Helmholtz eqn. coefficient
* omega Relaxation factor
* f(n,m) Right hand side function
* u(n,m) Dependent variable/Solution
* tol Tolerance for iterative solver
* maxit Maximum number of iterations
*
* Output : u(n,m) - Solution
*****************************************************************/
void jacobi( )
{
double omega;
int i,j,k;
double error,resid,ax,ay,b;
// double error_local;
// float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2;
// float te1,te2;
// float second;
omega=relax;
/*
* Initialize coefficients */
ax = 1.0/(dx*dx); /* X-direction coef */
ay = 1.0/(dy*dy); /* Y-direction coef */
b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */
error = 10.0 * tol;
k = 1;
while ((k<=mits)&&(error>tol))
{
error = 0.0;
/* Copy new solution into old */
{
for(i=0;i<n;i++)
for(j=0;j<m;j++)
uold[i][j] = u[i][j];
#pragma aitool fp_plus(5) fp_minus(2) fp_multiply(5) fp_divide(1)
for (i=1;i<(n-1);i++)
for (j=1;j<(m-1);j++)
{
resid = (ax*(uold[i-1][j] + uold[i+1][j])\
+ ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b;
u[i][j] = uold[i][j] - omega * resid;
error = error + resid*resid ;
}
}
/* omp end parallel */
/* Error check */
k = k + 1;
if (k%500==0)
printf("Finished %d iteration.\n",k);
error = sqrt(error)/(n*m);
} /* End iteration loop */
printf("Total Number of Iterations:%d\n",k);
printf("Residual:%E\n", error);
}
/* subroutine error_check (n,m,alpha,dx,dy,u,f)
implicit none
************************************************************
* Checks error between numerical and exact solution
*
************************************************************/
void error_check ( )
{
int i,j;
double xx,yy,temp,error;
dx = 2.0 / (n-1);
dy = 2.0 / (m-1);
error = 0.0 ;
#pragma aitool fp_plus(3) fp_minus(3) fp_multiply(6)
for (i=0;i<n;i++)
for (j=0;j<m;j++)
{
xx = -1.0 + dx * (i-1);
yy = -1.0 + dy * (j-1);
temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy);
error = error + temp*temp;
}
error = sqrt(error)/(n*m);
printf("Solution Error :%E \n",error);
}
|
GB_binop__bshift_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_01__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_03__bshift_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bshift_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bshift_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bshift_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bshift_int64)
// C=scalar+B GB (_bind1st__bshift_int64)
// C=scalar+B' GB (_bind1st_tran__bshift_int64)
// C=A+scalar GB (_bind2nd__bshift_int64)
// C=A'+scalar GB (_bind2nd_tran__bshift_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_bitshift_int64 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bshift_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__bshift_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bshift_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bshift_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_bitshift_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bshift_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_bitshift_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__bshift_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_bitshift_int64 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__bshift_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
calculate_embedded_signed_distance_to_3d_skin_process.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Pooyan Dadvand
// Ruben Zorrilla
// Daniel Baumgaertner
// Johannes Wolf
//
#if !defined(KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED )
#define KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// External includes
#include "includes/kratos_flags.h"
// Project includes
#include "includes/define.h"
#include "processes/process.h"
#include "includes/kratos_flags.h"
#include "includes/element.h"
#include "includes/model_part.h"
#include "geometries/geometry_data.h"
#include "utilities/openmp_utils.h"
namespace Kratos {
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/// Short class definition.
class CalculateEmbeddedSignedDistanceTo3DSkinProcess : public Process
{
public:
///@name Type Definitions
///@{
///@}
///@name Pointer Definitions
/// Pointer definition of CalculateEmbeddedSignedDistanceTo3DSkinProcess
KRATOS_CLASS_POINTER_DEFINITION(CalculateEmbeddedSignedDistanceTo3DSkinProcess);
///@}
///@name Life Cycle
///@{
CalculateEmbeddedSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid, bool DiscontinuousDistance = false)
: mrSkinModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid), mDiscontinuousDistance(DiscontinuousDistance)
{
}
/// Destructor.
~CalculateEmbeddedSignedDistanceTo3DSkinProcess() override
{
}
///@}
///@name Operators
///@{
void operator()()
{
Execute();
}
///@}
///@name Operations
///@{
void Execute() override
{
// Create a pointer to the discontinuous or continuos distance calculation process
CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer pdistance_calculator;
if(mDiscontinuousDistance)
{
pdistance_calculator = CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer(
new CalculateDiscontinuousDistanceToSkinProcess<3>(mrFluidModelPart, mrSkinModelPart));
}
else
{
pdistance_calculator = CalculateDiscontinuousDistanceToSkinProcess<3>::Pointer(
new CalculateDistanceToSkinProcess<3>(mrFluidModelPart, mrSkinModelPart));
}
// Call the distance calculator methods
pdistance_calculator->Initialize();
pdistance_calculator->FindIntersections();
pdistance_calculator->CalculateDistances(pdistance_calculator->GetIntersections());
// TODO: Raycasting
// Distance positive and negative peak values correction
this->PeakValuesCorrection(); //TODO: Check the correct behaviour of this method once the raycasting has been implemented
// Compute the embedded velocity
this->CalculateEmbeddedVelocity(pdistance_calculator->GetIntersections());
// Call the distance calculation Clear() to delete the intersection data
pdistance_calculator->Clear();
}
void Clear() override
{
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
std::string Info() const override
{
return "CalculateEmbeddedSignedDistanceTo3DSkinProcess";
}
/// Print information about this object.
void PrintInfo(std::ostream& rOStream) const override
{
rOStream << "CalculateEmbeddedSignedDistanceTo3DSkinProcess";
}
/// Print object's data.
void PrintData(std::ostream& rOStream) const override
{
}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void CalculateEmbeddedVelocity(std::vector<PointerVector<GeometricalObject>>& rIntersectedObjects)
{
const array_1d<double, 3> aux_zero = ZeroVector(3);
// #pragma omp parallel for firstprivate(aux_zero)
for (int k = 0; k < static_cast<int>(mrFluidModelPart.NumberOfElements()); ++k)
{
ModelPart::ElementsContainerType::iterator itFluidElement = mrFluidModelPart.ElementsBegin() + k;
const PointerVector<GeometricalObject>& intersected_skin_elems = rIntersectedObjects[k];
// Initialize the element EMBEDDED_VELOCITY
itFluidElement->SetValue(EMBEDDED_VELOCITY, aux_zero);
// Accumulate the VELOCITY from all the structure conditions that intersect the element
unsigned int intersection_counter = 0;
for(auto itSkinElement : intersected_skin_elems)
{
array_1d<double,3> emb_vel = (itSkinElement.GetGeometry()[0]).GetSolutionStepValue(VELOCITY);
emb_vel += (itSkinElement.GetGeometry()[1]).GetSolutionStepValue(VELOCITY);
emb_vel += (itSkinElement.GetGeometry()[2]).GetSolutionStepValue(VELOCITY);
itFluidElement->GetValue(EMBEDDED_VELOCITY) += emb_vel/3;
intersection_counter++;
}
// Set the EMBEDDED_VELOCITY as the average of the accumulated values
if (intersection_counter!=0)
{
itFluidElement->GetValue(EMBEDDED_VELOCITY) /= intersection_counter;
}
}
}
void PeakValuesCorrection()
{
// Obtain the maximum and minimum distance values to be set
double max_distance, min_distance;
this->SetMaximumAndMinimumDistanceValues(max_distance, min_distance);
// Bound the distance value in the non splitted nodes
block_for_each(mrFluidModelPart.Nodes(), [&](Node<3>& rNode){
if(rNode.IsNot(TO_SPLIT))
{
double& rnode_distance = rNode.FastGetSolutionStepValue(DISTANCE);
rnode_distance = (rnode_distance > 0.0) ? max_distance : min_distance;
}
});
}
void SetMaximumAndMinimumDistanceValues(double& max_distance, double& min_distance)
{
// Flag the nodes belonging to the splitted elements
for (int k = 0; k < static_cast<int>(mrFluidModelPart.NumberOfElements()); ++k)
{
ModelPart::ElementsContainerType::iterator itFluidElement = mrFluidModelPart.ElementsBegin() + k;
if(itFluidElement->Is(TO_SPLIT))
{
Geometry<Node<3>>& rGeom = itFluidElement->GetGeometry();
for (unsigned int i=0; i<rGeom.size(); ++i)
{
rGeom[i].Set(TO_SPLIT, true);
}
}
}
// Obtain the maximum and minimum nodal distance values of the nodes flagged as TO_SPLIT
const unsigned int num_threads = ParallelUtilities::GetNumThreads();
OpenMPUtils::PartitionVector nodes_partition;
OpenMPUtils::DivideInPartitions(mrFluidModelPart.NumberOfNodes(), num_threads, nodes_partition);
std::vector<double> max_distance_vect(num_threads, 1.0);
std::vector<double> min_distance_vect(num_threads, 1.0);
#pragma omp parallel shared(max_distance_vect, min_distance_vect)
{
const int k = OpenMPUtils::ThisThread();
ModelPart::NodeIterator nodes_begin = mrFluidModelPart.NodesBegin() + nodes_partition[k];
ModelPart::NodeIterator nodes_end = mrFluidModelPart.NodesBegin() + nodes_partition[k+1];
double max_local_distance = 1.0;
double min_local_distance = 1.0;
for( ModelPart::NodeIterator itFluidNode = nodes_begin; itFluidNode != nodes_end; ++itFluidNode)
{
if(itFluidNode->Is(TO_SPLIT))
{
const double node_distance = itFluidNode->FastGetSolutionStepValue(DISTANCE);
max_local_distance = (node_distance>max_local_distance) ? node_distance : max_local_distance;
min_local_distance = (node_distance<min_local_distance) ? node_distance : min_local_distance;
}
}
max_distance_vect[k] = max_local_distance;
min_distance_vect[k] = min_local_distance;
}
// Reduce to maximum and minimum the thread results
// Note that MSVC14 does not support max reductions, which are part of OpenMP 3.1
max_distance = max_distance_vect[0];
min_distance = min_distance_vect[0];
for (unsigned int k = 1; k < num_threads; k++)
{
max_distance = (max_distance > max_distance_vect[k]) ? max_distance : max_distance_vect[k];
min_distance = (min_distance < min_distance_vect[k]) ? min_distance : min_distance_vect[k];
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
ModelPart& mrSkinModelPart;
ModelPart& mrFluidModelPart;
bool mDiscontinuousDistance;
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
CalculateEmbeddedSignedDistanceTo3DSkinProcess& operator=(CalculateEmbeddedSignedDistanceTo3DSkinProcess const& rOther);
/// Copy constructor.
//CalculateEmbeddedSignedDistanceTo3DSkinProcess(CalculateEmbeddedSignedDistanceTo3DSkinProcess const& rOther);
///@}
}; // Class CalculateEmbeddedSignedDistanceTo3DSkinProcess
///@}
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >> (std::istream& rIStream,
CalculateEmbeddedSignedDistanceTo3DSkinProcess& rThis);
/// output stream function
inline std::ostream& operator << (std::ostream& rOStream,
const CalculateEmbeddedSignedDistanceTo3DSkinProcess& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
} // namespace Kratos.
#endif // KRATOS_CALCULATE_EMBEDDED_SIGNED_DISTANCE_TO_3D_SKIN_PROCESS_H_INCLUDED defined
|
sync.c | /*
* Copyright (c) 2009, 2010, 2011, ETH Zurich.
* All rights reserved.
*
* This file is distributed under the terms in the attached LICENSE file.
* If you do not find this file, copies can be found by writing to:
* ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group.
*/
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
#include <stdint.h>
#include <omp.h>
#include <arch/x86/barrelfish_kpi/asm_inlines_arch.h>
#define GANG_SCHEDULING
#undef MEASURE_SYNC
#define MEASURE
#define WORK_PERIOD 5000000000UL
#define STACK_SIZE (64 * 1024)
int main(int argc, char *argv[])
{
uint64_t now, start;
volatile uint64_t workcnt, workload = 0;
int64_t workmax = 1000;
int64_t i;
if(argc == 1) {
printf("calibrating...\n");
do {
workload = 0;
workmax *= 2;
start = rdtsc();
#pragma omp parallel private(i,workload)
for(i = 0; i < workmax; i++) {
#pragma omp barrier
workload++;
}
now = rdtsc();
} while(now - start < WORK_PERIOD);
printf("workmax = %ld\n", workmax);
return 0;
} else {
workmax = atol(argv[1]);
}
int nthreads = omp_get_max_threads();
if(argc == 3) {
nthreads = atoi(argv[2]);
backend_span_domain(nthreads, STACK_SIZE);
bomp_custom_init(NULL);
omp_set_num_threads(nthreads);
}
printf("threads %d, workmax %ld, CPUs %d\n", nthreads, workmax,
omp_get_num_procs());
#ifdef MEASURE_SYNC
uint64_t waits[16] = {
0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000,
0, 1000, 1000000, 1000000000, 500, 5000000, 5000000000, 3000000
};
uint64_t ts[16][10];
printf("before sync:\n");
#pragma omp parallel private(workcnt)
{
for(int j = 0; j < waits[omp_get_thread_num()]; j++) {
workcnt++;
}
for(int j = 0; j < 10; j++) {
ts[omp_get_thread_num()][j] = rdtsc();
}
}
for(int j = 0; j < 10; j++) {
printf("timestamp %d: ", j);
for(int n = 1; n < nthreads; n++) {
printf("%ld ", ts[n][j] - ts[n - 1][j]);
}
printf("\n");
}
printf("after sync:\n");
#pragma omp parallel
{
bomp_synchronize();
for(int j = 0; j < 10; j++) {
ts[omp_get_thread_num()][j] = rdtsc();
}
}
for(int j = 0; j < 10; j++) {
printf("timestamp %d: ", j);
for(int n = 1; n < nthreads; n++) {
printf("%ld ", ts[n][j] - ts[n - 1][j]);
}
printf("\n");
}
#endif
#ifdef GANG_SCHEDULING
#pragma omp parallel
{
bomp_synchronize();
}
#endif
start = rdtsc();
#ifdef MEASURE
# define MAXTHREADS 16
# define WORKMAX 10000
static uint64_t starta[MAXTHREADS][WORKMAX];
static uint64_t end1[MAXTHREADS][WORKMAX];
static uint64_t end2[MAXTHREADS][WORKMAX];
#endif
// Do some work
#pragma omp parallel private(workcnt,i)
for(i = 0; i < workmax; i++) {
#ifdef MEASURE
starta[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
workcnt++;
#ifdef MEASURE
end1[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
#pragma omp barrier
#ifdef MEASURE
end2[omp_get_thread_num()][i < WORKMAX ? i : WORKMAX] = rdtsc();
#endif
}
now = rdtsc();
#ifdef MEASURE
printf("avg compute time: ");
for(int n = 0; n < nthreads; n++) {
uint64_t sum = 0, min = end1[0][0], max = 0;
for(i = 0; i < WORKMAX; i++) {
uint64_t val = end1[n][i] - starta[n][i];
sum += val;
min = val < min ? val : min;
max = val > max ? val : max;
}
printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max);
}
printf("\n");
#if 0
printf("wait time dump:\n");
for(i = 0; i < WORKMAX; i++) {
for(int n = 0; n < nthreads; n++) {
uint64_t val = end2[n][i] - end1[n][i];
printf("%lu ", val);
}
printf("\n");
}
#endif
printf("avg wait time: ");
for(int n = 0; n < nthreads; n++) {
uint64_t sum = 0, min = end2[0][0], max = 0;
for(i = 0; i < WORKMAX; i++) {
uint64_t val = end2[n][i] - end1[n][i];
sum += val;
min = val < min ? val : min;
max = val > max ? val : max;
}
printf("%lu(%lu,%lu) ", sum / WORKMAX, min, max);
}
printf("\n");
#endif
printf("%s: threads %d, compute time %lu ticks\n", argv[0], nthreads, now - start);
for(;;);
return 0;
}
|
GB_unaryop__minv_bool_int8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__minv_bool_int8
// op(A') function: GB_tran__minv_bool_int8
// C type: bool
// A type: int8_t
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__minv_bool_int8
(
bool *Cx, // Cx and Ax may be aliased
int8_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__minv_bool_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__pow_uint32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_08__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_04__pow_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_uint32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pow_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__pow_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_uint32)
// C=scalar+B GB (_bind1st__pow_uint32)
// C=scalar+B' GB (_bind1st_tran__pow_uint32)
// C=A+scalar GB (_bind2nd__pow_uint32)
// C=A'+scalar GB (_bind2nd_tran__pow_uint32)
// C type: uint32_t
// A type: uint32_t
// A pattern? 0
// B type: uint32_t
// B pattern? 0
// BinaryOp: cij = GB_pow_uint32 (aij, bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_pow_uint32 (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_POW || GxB_NO_UINT32 || GxB_NO_POW_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pow_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pow_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint32_t alpha_scalar ;
uint32_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint32_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__pow_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__pow_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__pow_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_pow_uint32 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__pow_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_pow_uint32 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint32 (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__pow_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_pow_uint32 (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__pow_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
3777.c |
/*
* Compile using the command:
* `cc 27Stencil.c -o oa -fopenmp -lm`
*/
#include <math.h>
#include <omp.h>
#include <stdint.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef _OPENACC
#include <openacc.h>
#endif
#define DEFAULT_DATASIZE 1048576 /* Default datasize. */
#define DEFAULT_REPS 10 /* Default repetitions. */
#define CONF95 1.96
#define ITERATIONS 10
#define FAC (1./26)
#define TOLERANCE 1.0e-15
extern int reps; /* Repetitions. */
extern double *times; /* Array to store results in. */
extern int flag; /* Flag to set CPU or GPU invocation. */
extern unsigned int datasize; /* Datasize passed to benchmark functions. */
unsigned int datasize = -1; /* Datasize for tests in bytes. */
int reps = -1; /* Repetitions. */
double *times; /* Array of doubles storing the benchmark times in microseconds. */
double testtime; /* The average test time in microseconds for reps runs. */
double testsd; /* The standard deviation in the test time in microseconds for reps runs. */
int flag = 0; /* 0 indicates CPU. */
/*
* Function prototypes for common functions.
*/
void init(int argc, char **argv);
void finalisetest(char *);
void finalise(void);
void benchmark(char *, double (*test)(void));
void print_results(char *, double, double);
/* Forward Declarations of utility functions*/
double max_diff(double *, double *, int);
void wul();
void usage(char *argv[]) {
printf("Usage: %s \n"
"\t--reps <repetitions> (default %d)\n"
"\t--datasize <datasize> (default %d bytes)\n",
argv[0],
DEFAULT_REPS, DEFAULT_DATASIZE);
}
/*
* This function parses the parameters from the command line.
*/
void parse_args(int argc, char *argv[]) {
int arg;
for (arg = 1; arg < argc; arg++) {
if (strcmp(argv[arg], "--reps") == 0) {
reps = atoi(argv[++arg]);
if (reps == 0) {
printf("Invalid integer:--reps: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "--datasize") == 0) {
datasize = atoi(argv[++arg]);
if (datasize == 0) {
printf("Invalid integer:--datasize: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
} else if (strcmp(argv[arg], "-h") == 0) {
usage(argv);
exit(EXIT_SUCCESS);
} else {
printf("Invalid parameters: %s\n", argv[arg]);
usage(argv);
exit(EXIT_FAILURE);
}
}
}
void stats(double *mtp, double *sdp) {
double meantime, totaltime, sumsq, mintime, maxtime, sd;
int i, good_reps;
mintime = 1.0e10;
maxtime = 0.;
totaltime = 0.;
good_reps = 0;
for (i = 0; i < reps; i++) {
/* Skip entries where times is 0, this indicates an error occured */
if (times[i] != 0){
mintime = (mintime < times[i]) ? mintime : times[i];
maxtime = (maxtime > times[i]) ? maxtime : times[i];
totaltime += times[i];
good_reps++;
}
}
meantime = totaltime / good_reps;
sumsq = 0;
for (i = 0; i < reps; i++) {
if (times[i] != 0){
sumsq += (times[i] - meantime) * (times[i] - meantime);
}
}
sd = sqrt(sumsq / good_reps);
*mtp = meantime;
*sdp = sd;
}
/*
* This function prints the results of the tests.
* If you use a compiler which sets a different preprocessor flag
* you may wish to add it here.
*/
void print_results(char *name, double testtime, double testsd) {
char compiler[20];
/* Set default compiler idetifier. */
sprintf(compiler, "COMPILER");
/* Set compiler identifier based on known preprocessor flags. */
#ifdef __PGI
sprintf(compiler, "PGI");
#endif
#ifdef __HMPP
sprintf(compiler, "CAPS");
#endif
//printf("%s %s %d %f %f\n", compiler, name, datasize, testtime*1e6, CONF95*testsd*1e6);
printf("%f\n", testtime*1e6);
}
/*
* This function initialises the storage for the test results and set the defaults.
*/
void init(int argc, char **argv)
{
parse_args(argc, argv);
if (reps == -1) {
reps = DEFAULT_REPS;
}
if (datasize == (unsigned int)-1) {
datasize = DEFAULT_DATASIZE;
}
times = (double *)malloc((reps) * sizeof(double));
/*
#ifdef __PGI
acc_init(acc_device_nvidia);
// printf("PGI INIT\n");
#endif
#ifdef __HMPP
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
#ifdef _CRAYC
int a[5] = {1,2,3,4,5};
#pragma acc data copyin(a[0:5])
{}
#endif
*/
}
void finalise(void) {
free(times);
}
/*
* This function runs the benchmark specified.
*/
void benchmark(char *name, double (*test)(void))
{
int i = 0;
double tmp = 0;
for (i=0; i<reps; i++) {
tmp = test();
if (tmp == -10000){
printf("Memory allocation failure in %s\n", name);
times[i] = 0;
}
else if (tmp == -11000){
printf("CPU/GPU mismatch in %s\n", name);
times[i] = 0;
}
else{
times[i] = tmp;
}
}
stats(&testtime, &testsd);
//printf("in benchmark\n");
print_results(name, testtime, testsd);
//printf("printed result\n");
}
double stencil()
{
extern unsigned int datasize;
int sz = cbrt((datasize/sizeof(double))/2);
int i, j, k, iter;
int n = sz-2;
double fac = FAC;
double t1, t2;
double md;
//printf("size = %d\n", sz);
/* Work buffers, with halos */
double *a0 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *device_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a1 = (double*)malloc(sizeof(double)*sz*sz*sz);
double *host_result = (double*)malloc(sizeof(double)*sz*sz*sz);
double *a0_init = (double*)malloc(sizeof(double)*sz*sz*sz);
if(a0==NULL||device_result==NULL||a1==NULL||host_result==NULL||a0_init==NULL){
/* Something went wrong in the memory allocation here, fail gracefully */
return(-10000);
}
/* initialize input array a0 */
/* zero all of array (including halos) */
//printf("size = %d\n", sz);
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = 0.0;
//printf("%d\t", (i*sz*sz+j*sz+k));
}
}
}
//printf("\n");
//int size_of_a0 = sizeof(a0) / sizeof(*a0);
//printf("size of a0 = %d\n", size_of_a0);
/* use random numbers to fill interior */
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = (double) rand()/ (double)(1.0 + RAND_MAX);
}
}
}
/* memcpy(&a0_init[0], &a0[0], sizeof(double)*sz*sz*sz); */
/* save initial input array for later GPU run */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0_init[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
}
}
}
//printf("Host computation\n");
/* run main computation on host */
for (iter = 0; iter < ITERATIONS; iter++) {
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
/* save result */
/* memcpy(&host_result[0], &a0[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
host_result[i*sz*sz+j*sz+k] = a0[i*sz*sz+j*sz+k];
// printf("%lf\t", a0[i*sz*sz+j*sz+k]);
}
}
}
//int size = sizeof(host_result)/sizeof(host_result[0]);
//for(i = 0; i < size; i++) {
// printf("%lf\t", host_result[i]);
//}
//printf("\n");
/* copy initial array back to a0 */
/* memcpy(&a0[0], &a0_init[0], sizeof(double)*sz*sz*sz); */
for (i = 0; i < sz; i++) {
for (j = 0; j < sz; j++) {
for (k = 0; k < sz; k++) {
a0[i*sz*sz+j*sz+k] = a0_init[i*sz*sz+j*sz+k];
}
}
}
//printf("Starting acc pragma code\n");
t1 = omp_get_wtime();
#pragma acc data copy(a0[0:sz*sz*sz]), create(a1[0:sz*sz*sz], i,j,k,iter), copyin(sz,fac,n)
{
for (iter = 0; iter < ITERATIONS; iter++) {
#pragma omp p0 parallel for schedule(dynamic, 28) simd num_threads(28)
for (i = 1; i < n+1; i++) {
#pragma omp p1 parallel for schedule(dynamic, 28) simd num_threads(28)
for (j = 1; j < n+1; j++) {
#pragma omp p2 parallel for schedule(dynamic, 28) simd num_threads(28)
for (k = 1; k < n+1; k++) {
a1[i*sz*sz+j*sz+k] = (
a0[i*sz*sz+(j-1)*sz+k] + a0[i*sz*sz+(j+1)*sz+k] +
a0[(i-1)*sz*sz+j*sz+k] + a0[(i+1)*sz*sz+j*sz+k] +
a0[(i-1)*sz*sz+(j-1)*sz+k] + a0[(i-1)*sz*sz+(j+1)*sz+k] +
a0[(i+1)*sz*sz+(j-1)*sz+k] + a0[(i+1)*sz*sz+(j+1)*sz+k] +
a0[i*sz*sz+(j-1)*sz+(k-1)] + a0[i*sz*sz+(j+1)*sz+(k-1)] +
a0[(i-1)*sz*sz+j*sz+(k-1)] + a0[(i+1)*sz*sz+j*sz+(k-1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k-1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k-1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k-1)] +
a0[i*sz*sz+(j-1)*sz+(k+1)] + a0[i*sz*sz+(j+1)*sz+(k+1)] +
a0[(i-1)*sz*sz+j*sz+(k+1)] + a0[(i+1)*sz*sz+j*sz+(k+1)] +
a0[(i-1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i-1)*sz*sz+(j+1)*sz+(k+1)] +
a0[(i+1)*sz*sz+(j-1)*sz+(k+1)] + a0[(i+1)*sz*sz+(j+1)*sz+(k+1)] +
a0[i*sz*sz+j*sz+(k-1)] + a0[i*sz*sz+j*sz+(k+1)]
) * fac;
}
}
}
#pragma acc parallel loop
for (i = 1; i < n+1; i++) {
#pragma acc loop
for (j = 1; j < n+1; j++) {
#pragma acc loop
for (k = 1; k < n+1; k++) {
a0[i*sz*sz+j*sz+k] = a1[i*sz*sz+j*sz+k];
}
}
}
} /* end iteration loop */
} /* end data region */
#pragma acc wait
t2 = omp_get_wtime();
memcpy(&device_result[0], &a0[0], sizeof(double)*sz*sz*sz);
md = max_diff(&host_result[0],&device_result[0], sz);
/* Free malloc'd memory to prevent leaks */
free(a0);
free(a0_init);
free(a1);
free(host_result);
free(device_result);
//printf("md: %lf \t tolerance: %lf", md, TOLERANCE);
if (md < TOLERANCE ){
//printf ("GPU matches host to within tolerance of %1.1e\n\n", TOLERANCE);
return(t2 - t1);
}
else{
// printf ("WARNING: GPU does not match to within tolerance of %1.1e\nIt is %lf\n", TOLERANCE, md);
return(-11000);
}
}
/* Utility Functions */
double max_diff(double *array1,double *array2, int sz)
{
double tmpdiff, diff;
int i,j,k;
int n = sz-2;
diff=0.0;
for (i = 1; i < n+1; i++) {
for (j = 1; j < n+1; j++) {
for (k = 1; k < n+1; k++) {
tmpdiff = fabs(array1[i*sz*sz+j*sz+k] - array2[i*sz*sz+j*sz+k]);
//printf("diff: %lf", tmpdiff);
if (tmpdiff > diff) diff = tmpdiff;
}
}
}
return diff;
}
/*
* This function ensures the device is awake.
* It is more portable than acc_init().
*/
void wul(){
int data = 8192;
double *arr_a = (double *)malloc(sizeof(double) * data);
double *arr_b = (double *)malloc(sizeof(double) * data);
int i = 0;
if (arr_a==NULL||arr_b==NULL) {
printf("Unable to allocate memory in wul.\n");
}
for (i=0;i<data;i++){
arr_a[i] = (double) (rand()/(1.0+RAND_MAX));
}
#pragma acc data copy(arr_b[0:data]), copyin(arr_a[0:data])
{
#pragma acc parallel loop
for (i=0;i<data;i++){
arr_b[i] = arr_a[i] * 2;
}
}
if (arr_a[0] < 0){
printf("Error in WUL\n");
/*
* This should never be called as rands should be in the range (0,1].
* This stops clever optimizers.
*/
}
free(arr_a);
free(arr_b);
}
int main(int argc, char **argv) {
char testName[32];
//printf("compiler name datasize testtime*1e6 CONF95*testsd*1e6\n");
/* Initialise storage for test results & parse input arguements. */
init(argc, argv);
/* Ensure device is awake. */
wul();
sprintf(testName, "27S");
benchmark(testName, &stencil);
/* Print results & free results storage */
finalise();
return EXIT_SUCCESS;
}
|
master-3.c | /* { dg-do compile } */
/* { dg-options "-fopenmp -fdump-tree-ompexp" } */
/* LLVM LOCAL test not applicable */
/* { dg-require-fdump "" } */
extern void bar(int);
void foo (void)
{
#pragma omp master
bar(0);
}
/* { dg-final { scan-tree-dump-times "omp_get_thread_num" 1 "ompexp" } } */
/* { dg-final { cleanup-tree-dump "ompexp" } } */
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/MagickCore.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
clone_view=(ImageView *) AcquireCriticalMemory(sizeof(*clone_view));
(void) memset(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickCoreSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickCoreSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict duplex_pixels,
*magick_restrict pixels;
Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticMetacontent() returns the image view authentic
% meta-content.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% void *GetImageViewAuthenticMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport void *GetImageViewAuthenticMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% Quantum *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Quantum *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MagickPathExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MagickPathExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MagickPathExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MagickPathExtent);
(void) ConcatenateMagickString(description,")",MagickPathExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualMetacontent() returns the image view virtual
% meta-content.
%
% The format of the GetImageViewVirtualMetacontent method is:
%
% const void *GetImageViewVirtualMetacontent(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const void *GetImageViewVirtualMetacontent(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualMetacontent(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const Quantum *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const Quantum *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageView(Image *image,ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height,
ExceptionInfo *exception)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
image_view=(ImageView *) AcquireCriticalMemory(sizeof(*image_view));
(void) memset(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->view=AcquireVirtualCacheView(image_view->image,exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->exception=AcquireExceptionInfo();
image_view->debug=IsEventLogging();
image_view->signature=MagickCoreSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickCoreSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickCoreSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=destination->extent.height-destination->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,destination->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(destination_image,destination->description,
progress,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
status=SetImageStorageClass(destination_image,DirectClass,
destination->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
const Quantum
*magick_restrict pixels;
Quantum
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,
destination->exception);
if (destination_pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,destination->exception);
if (sync == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickCoreSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=SetImageStorageClass(source_image,DirectClass,source->exception);
if (status == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=source->extent.height-source->extent.y;
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
status=SyncCacheViewAuthenticPixels(source->view,source->exception);
if (status == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(source_image,source->description,progress,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
updater_basemaker-inl.h | /*!
* Copyright 2014 by Contributors
* \file updater_basemaker-inl.h
* \brief implement a common tree constructor
* \author Tianqi Chen
*/
#ifndef XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#define XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
#include <rabit/rabit.h>
#include <xgboost/base.h>
#include <xgboost/tree_updater.h>
#include <vector>
#include <algorithm>
#include <string>
#include <limits>
#include <utility>
#include "./param.h"
#include "../common/io.h"
#include "../common/random.h"
#include "../common/quantile.h"
namespace xgboost {
namespace tree {
/*!
* \brief base tree maker class that defines common operation
* needed in tree making
*/
class BaseMaker: public TreeUpdater {
public:
void Init(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.InitAllowUnknown(args);
}
protected:
// helper to collect and query feature meta information
struct FMetaHelper {
public:
/*! \brief find type of each feature, use column format */
inline void InitByCol(DMatrix* p_fmat,
const RegTree& tree) {
fminmax_.resize(tree.param.num_feature * 2);
std::fill(fminmax_.begin(), fminmax_.end(),
-std::numeric_limits<bst_float>::max());
// start accumulating statistics
for (const auto &batch : p_fmat->GetSortedColumnBatches()) {
for (bst_uint fid = 0; fid < batch.Size(); ++fid) {
auto c = batch[fid];
if (c.size() != 0) {
fminmax_[fid * 2 + 0] =
std::max(-c[0].fvalue, fminmax_[fid * 2 + 0]);
fminmax_[fid * 2 + 1] =
std::max(c[c.size() - 1].fvalue, fminmax_[fid * 2 + 1]);
}
}
}
}
/*! \brief synchronize the information */
inline void SyncInfo() {
rabit::Allreduce<rabit::op::Max>(dmlc::BeginPtr(fminmax_), fminmax_.size());
}
// get feature type, 0:empty 1:binary 2:real
inline int Type(bst_uint fid) const {
CHECK_LT(fid * 2 + 1, fminmax_.size())
<< "FeatHelper fid exceed query bound ";
bst_float a = fminmax_[fid * 2];
bst_float b = fminmax_[fid * 2 + 1];
if (a == -std::numeric_limits<bst_float>::max()) return 0;
if (-a == b) {
return 1;
} else {
return 2;
}
}
inline bst_float MaxValue(bst_uint fid) const {
return fminmax_[fid *2 + 1];
}
inline void SampleCol(float p, std::vector<bst_uint> *p_findex) const {
std::vector<bst_uint> &findex = *p_findex;
findex.clear();
for (size_t i = 0; i < fminmax_.size(); i += 2) {
const auto fid = static_cast<bst_uint>(i / 2);
if (this->Type(fid) != 0) findex.push_back(fid);
}
auto n = static_cast<unsigned>(p * findex.size());
std::shuffle(findex.begin(), findex.end(), common::GlobalRandom());
findex.resize(n);
// sync the findex if it is subsample
std::string s_cache;
common::MemoryBufferStream fc(&s_cache);
dmlc::Stream& fs = fc;
if (rabit::GetRank() == 0) {
fs.Write(findex);
}
rabit::Broadcast(&s_cache, 0);
fs.Read(&findex);
}
private:
std::vector<bst_float> fminmax_;
};
// ------static helper functions ------
// helper function to get to next level of the tree
/*! \brief this is helper function for row based data*/
inline static int NextLevel(const SparsePage::Inst &inst, const RegTree &tree, int nid) {
const RegTree::Node &n = tree[nid];
bst_uint findex = n.SplitIndex();
for (const auto& ins : inst) {
if (findex == ins.index) {
if (ins.fvalue < n.SplitCond()) {
return n.LeftChild();
} else {
return n.RightChild();
}
}
}
return n.DefaultChild();
}
// ------class member helpers---------
/*! \brief initialize temp data structure */
inline void InitData(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree) {
CHECK_EQ(tree.param.num_nodes, tree.param.num_roots)
<< "TreeMaker: can only grow new tree";
const std::vector<unsigned> &root_index = fmat.Info().root_index_;
{
// setup position
position_.resize(gpair.size());
if (root_index.size() == 0) {
std::fill(position_.begin(), position_.end(), 0);
} else {
for (size_t i = 0; i < position_.size(); ++i) {
position_[i] = root_index[i];
CHECK_LT(root_index[i], (unsigned)tree.param.num_roots)
<< "root index exceed setting";
}
}
// mark delete for the deleted datas
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) position_[i] = ~position_[i];
}
// mark subsample
if (param_.subsample < 1.0f) {
std::bernoulli_distribution coin_flip(param_.subsample);
auto& rnd = common::GlobalRandom();
for (size_t i = 0; i < position_.size(); ++i) {
if (gpair[i].GetHess() < 0.0f) continue;
if (!coin_flip(rnd)) position_[i] = ~position_[i];
}
}
}
{
// expand query
qexpand_.reserve(256); qexpand_.clear();
for (int i = 0; i < tree.param.num_roots; ++i) {
qexpand_.push_back(i);
}
this->UpdateNode2WorkIndex(tree);
}
}
/*! \brief update queue expand add in new leaves */
inline void UpdateQueueExpand(const RegTree &tree) {
std::vector<int> newnodes;
for (int nid : qexpand_) {
if (!tree[nid].IsLeaf()) {
newnodes.push_back(tree[nid].LeftChild());
newnodes.push_back(tree[nid].RightChild());
}
}
// use new nodes for qexpand
qexpand_ = newnodes;
this->UpdateNode2WorkIndex(tree);
}
// return decoded position
inline int DecodePosition(bst_uint ridx) const {
const int pid = position_[ridx];
return pid < 0 ? ~pid : pid;
}
// encode the encoded position value for ridx
inline void SetEncodePosition(bst_uint ridx, int nid) {
if (position_[ridx] < 0) {
position_[ridx] = ~nid;
} else {
position_[ridx] = nid;
}
}
/*!
* \brief this is helper function uses column based data structure,
* reset the positions to the lastest one
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void ResetPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
// set the positions in the nondefault
this->SetNonDefaultPositionCol(nodes, p_fmat, tree);
this->SetDefaultPostion(p_fmat, tree);
}
/*!
* \brief helper function to set the non-leaf positions to default direction.
* This function can be applied multiple times and will get the same result.
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
inline void SetDefaultPostion(DMatrix *p_fmat,
const RegTree &tree) {
// set default direct nodes to default
// for leaf nodes that are not fresh, mark then to ~nid,
// so that they are ignored in future statistics collection
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = this->DecodePosition(ridx);
if (tree[nid].IsLeaf()) {
// mark finish when it is not a fresh leaf
if (tree[nid].RightChild() == -1) {
position_[ridx] = ~nid;
}
} else {
// push to default branch
if (tree[nid].DefaultLeft()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* to CORRECT the positions of non-default directions that WAS set to default
* before calling this function.
* \param batch The column batch
* \param sorted_split_set The set of index that contains split solutions.
* \param tree the regression tree structure
*/
inline void CorrectNonDefaultPositionByBatch(
const SparsePage &batch, const std::vector<bst_uint> &sorted_split_set,
const RegTree &tree) {
for (size_t fid = 0; fid < batch.Size(); ++fid) {
auto col = batch[fid];
auto it = std::lower_bound(sorted_split_set.begin(), sorted_split_set.end(), fid);
if (it != sorted_split_set.end() && *it == fid) {
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
CHECK(tree[nid].IsLeaf());
int pid = tree[nid].Parent();
// go back to parent, correct those who are not default
if (!tree[nid].IsRoot() && tree[pid].SplitIndex() == fid) {
if (fvalue < tree[pid].SplitCond()) {
this->SetEncodePosition(ridx, tree[pid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[pid].RightChild());
}
}
}
}
}
}
/*!
* \brief this is helper function uses column based data structure,
* \param nodes the set of nodes that contains the split to be used
* \param tree the regression tree structure
* \param out_split_set The split index set
*/
inline void GetSplitSet(const std::vector<int> &nodes,
const RegTree &tree,
std::vector<unsigned>* out_split_set) {
std::vector<unsigned>& fsplits = *out_split_set;
fsplits.clear();
// step 1, classify the non-default data into right places
for (int nid : nodes) {
if (!tree[nid].IsLeaf()) {
fsplits.push_back(tree[nid].SplitIndex());
}
}
std::sort(fsplits.begin(), fsplits.end());
fsplits.resize(std::unique(fsplits.begin(), fsplits.end()) - fsplits.begin());
}
/*!
* \brief this is helper function uses column based data structure,
* update all positions into nondefault branch, if any, ignore the default branch
* \param nodes the set of nodes that contains the split to be used
* \param p_fmat feature matrix needed for tree construction
* \param tree the regression tree structure
*/
virtual void SetNonDefaultPositionCol(const std::vector<int> &nodes,
DMatrix *p_fmat,
const RegTree &tree) {
std::vector<unsigned> fsplits;
this->GetSplitSet(nodes, tree, &fsplits);
for (const auto &batch : p_fmat->GetSortedColumnBatches()) {
for (auto fid : fsplits) {
auto col = batch[fid];
const auto ndata = static_cast<bst_omp_uint>(col.size());
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_uint ridx = col[j].index;
const bst_float fvalue = col[j].fvalue;
const int nid = this->DecodePosition(ridx);
// go back to parent, correct those who are not default
if (!tree[nid].IsLeaf() && tree[nid].SplitIndex() == fid) {
if (fvalue < tree[nid].SplitCond()) {
this->SetEncodePosition(ridx, tree[nid].LeftChild());
} else {
this->SetEncodePosition(ridx, tree[nid].RightChild());
}
}
}
}
}
}
/*! \brief helper function to get statistics from a tree */
template<typename TStats>
inline void GetNodeStats(const std::vector<GradientPair> &gpair,
const DMatrix &fmat,
const RegTree &tree,
std::vector< std::vector<TStats> > *p_thread_temp,
std::vector<TStats> *p_node_stats) {
std::vector< std::vector<TStats> > &thread_temp = *p_thread_temp;
const MetaInfo &info = fmat.Info();
thread_temp.resize(omp_get_max_threads());
p_node_stats->resize(tree.param.num_nodes);
#pragma omp parallel
{
const int tid = omp_get_thread_num();
thread_temp[tid].resize(tree.param.num_nodes, TStats(param_));
for (unsigned int nid : qexpand_) {
thread_temp[tid][nid].Clear();
}
}
// setup position
const auto ndata = static_cast<bst_omp_uint>(fmat.Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint ridx = 0; ridx < ndata; ++ridx) {
const int nid = position_[ridx];
const int tid = omp_get_thread_num();
if (nid >= 0) {
thread_temp[tid][nid].Add(gpair, info, ridx);
}
}
// sum the per thread statistics together
for (int nid : qexpand_) {
TStats &s = (*p_node_stats)[nid];
s.Clear();
for (size_t tid = 0; tid < thread_temp.size(); ++tid) {
s.Add(thread_temp[tid][nid]);
}
}
}
/*! \brief common helper data structure to build sketch */
struct SketchEntry {
/*! \brief total sum of amount to be met */
double sum_total;
/*! \brief statistics used in the sketch */
double rmin, wmin;
/*! \brief last seen feature value */
bst_float last_fvalue;
/*! \brief current size of sketch */
double next_goal;
// pointer to the sketch to put things in
common::WXQuantileSketch<bst_float, bst_float> *sketch;
// initialize the space
inline void Init(unsigned max_size) {
next_goal = -1.0f;
rmin = wmin = 0.0f;
sketch->temp.Reserve(max_size + 1);
sketch->temp.size = 0;
}
/*!
* \brief push a new element to sketch
* \param fvalue feature value, comes in sorted ascending order
* \param w weight
* \param max_size
*/
inline void Push(bst_float fvalue, bst_float w, unsigned max_size) {
if (next_goal == -1.0f) {
next_goal = 0.0f;
last_fvalue = fvalue;
wmin = w;
return;
}
if (last_fvalue != fvalue) {
double rmax = rmin + wmin;
if (rmax >= next_goal && sketch->temp.size != max_size) {
if (sketch->temp.size == 0 ||
last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
CHECK_LT(sketch->temp.size, max_size)
<< "invalid maximum size max_size=" << max_size
<< ", stemp.size" << sketch->temp.size;
++sketch->temp.size;
}
if (sketch->temp.size == max_size) {
next_goal = sum_total * 2.0f + 1e-5f;
} else {
next_goal = static_cast<bst_float>(sketch->temp.size * sum_total / max_size);
}
} else {
if (rmax >= next_goal) {
LOG(TRACKER) << "INFO: rmax=" << rmax
<< ", sum_total=" << sum_total
<< ", naxt_goal=" << next_goal
<< ", size=" << sketch->temp.size;
}
}
rmin = rmax;
wmin = w;
last_fvalue = fvalue;
} else {
wmin += w;
}
}
/*! \brief push final unfinished value to the sketch */
inline void Finalize(unsigned max_size) {
double rmax = rmin + wmin;
if (sketch->temp.size == 0 || last_fvalue > sketch->temp.data[sketch->temp.size-1].value) {
CHECK_LE(sketch->temp.size, max_size)
<< "Finalize: invalid maximum size, max_size=" << max_size
<< ", stemp.size=" << sketch->temp.size;
// push to sketch
sketch->temp.data[sketch->temp.size] =
common::WXQuantileSketch<bst_float, bst_float>::
Entry(static_cast<bst_float>(rmin),
static_cast<bst_float>(rmax),
static_cast<bst_float>(wmin), last_fvalue);
++sketch->temp.size;
}
sketch->PushTemp();
}
};
/*! \brief training parameter of tree grower */
TrainParam param_;
/*! \brief queue of nodes to be expanded */
std::vector<int> qexpand_;
/*!
* \brief map active node to is working index offset in qexpand,
* can be -1, which means the node is node actively expanding
*/
std::vector<int> node2workindex_;
/*!
* \brief position of each instance in the tree
* can be negative, which means this position is no longer expanding
* see also Decode/EncodePosition
*/
std::vector<int> position_;
private:
inline void UpdateNode2WorkIndex(const RegTree &tree) {
// update the node2workindex
std::fill(node2workindex_.begin(), node2workindex_.end(), -1);
node2workindex_.resize(tree.param.num_nodes);
for (size_t i = 0; i < qexpand_.size(); ++i) {
node2workindex_[qexpand_[i]] = static_cast<int>(i);
}
}
};
} // namespace tree
} // namespace xgboost
#endif // XGBOOST_TREE_UPDATER_BASEMAKER_INL_H_
|
mkl_util.h | /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#define TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
#ifdef INTEL_MKL
#include <string>
#include <vector>
#include "mkl_dnn.h"
#include "mkl_dnn_types.h"
#include "mkl_service.h"
#include "mkl_trans.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/graph/mkl_graph_util.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/gtl/array_slice.h"
#include "tensorflow/core/platform/logging.h"
#include "tensorflow/core/platform/macros.h"
#include "tensorflow/core/util/padding.h"
#include "tensorflow/core/util/tensor_format.h"
#ifndef INTEL_MKL_ML
#include "mkldnn.hpp"
using mkldnn::engine;
using mkldnn::memory;
using mkldnn::padding_kind;
using mkldnn::primitive;
using mkldnn::reorder;
#endif
// The file contains a number of utility classes and functions used by MKL
// enabled kernels
namespace tensorflow {
// This class encapsulates all the meta data that is associated with an MKL
// tensor. A tensor is an MKL tensor if it was created as the result of an
// MKL operation, and did not go through a conversion to a standard
// Tensorflow tensor.
typedef enum { W = 0, H = 1, C = 2, N = 3 } MklDims;
typedef enum {
Dim_N = 0,
Dim_C = 1,
Dim_H = 2,
Dim_W = 3,
Dim_O = 0,
Dim_I = 1
} MklDnnDims;
class MklShape {
public:
MklShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklShape); // Cannot copy
~MklShape() {
if (sizes_) delete[] sizes_;
if (strides_) delete[] strides_;
if (mklLayout_) CHECK_EQ(dnnLayoutDelete_F32(mklLayout_), E_SUCCESS);
if (tfLayout_) CHECK_EQ(dnnLayoutDelete_F32(tfLayout_), E_SUCCESS);
if (tf_to_mkl_dim_map_) delete[] tf_to_mkl_dim_map_;
}
const bool IsMklTensor() const { return isMklTensor_; }
void SetMklTensor(const bool isMklTensor) { isMklTensor_ = isMklTensor; }
void SetDimensions(const size_t dimension) { dimension_ = dimension; }
void SetMklLayout(dnnLayout_t mklLayout) { mklLayout_ = mklLayout; }
void SetMklLayout(const void* primitive, size_t resourceType) {
CHECK_EQ(
dnnLayoutCreateFromPrimitive_F32(&mklLayout_, (dnnPrimitive_t)primitive,
(dnnResourceType_t)resourceType),
E_SUCCESS);
}
void SetTfLayout(const size_t dimension, const size_t* sizes,
const size_t* strides) {
dimension_ = dimension;
if (dimension > 0) { // MKl doesn't support zero dimension tensors
sizes_ = new size_t[dimension];
strides_ = new size_t[dimension];
for (int ii = 0; ii < dimension; ii++) {
sizes_[ii] = sizes[ii];
strides_[ii] = strides[ii];
}
CHECK_EQ(dnnLayoutCreate_F32(&tfLayout_, dimension, sizes, strides),
E_SUCCESS);
}
}
// Default case - MKL dim ordering is opposite of TF dim ordering
// MKL -> (DIMS-1)...0 where (DIMS-1) is outermost dim and 0 is innermost dim
// TF -> 0...(DIMS-1) where 0 is outermost dim and (DIMS-1) is innermost dim
// For layers that rely on data_format semantics (conv, pooling etc.)
// or operate only on certain dimensions (relu, concat, split etc.),
// Mkl APIs might require us to reorder these dimensions. In such cases,
// kernels should explicitly set this map
void SetTfDimOrder(const size_t dimension) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = dimension - (ii + 1);
}
}
void SetTfDimOrder(const size_t dimension, const size_t* tf_to_mkl_dim_map) {
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
for (size_t ii = 0; ii < dimension; ii++) {
tf_to_mkl_dim_map_[ii] = tf_to_mkl_dim_map[ii];
}
}
void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
CHECK_EQ(dimension, 4);
CHECK(dimension == dimension_);
if (tf_to_mkl_dim_map_ == nullptr) {
tf_to_mkl_dim_map_ = new size_t[dimension];
}
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDims::W;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDims::H;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDims::C;
tf_to_mkl_dim_map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDims::N;
}
const dnnLayout_t GetMklLayout() const { return mklLayout_; }
const dnnLayout_t GetTfLayout() const { return tfLayout_; }
const dnnLayout_t GetCurLayout() const {
return isMklTensor_ ? mklLayout_ : tfLayout_;
}
size_t GetDimension() const { return dimension_; }
const size_t* GetSizes() const { return sizes_; }
int64 dim_size(int index) const { return sizes_[index]; }
int64 tf_dim_size(int index) const {
return sizes_[tf_to_mkl_dim_map_[index]];
}
const size_t* GetStrides() const { return strides_; }
const size_t* GetTfToMklDimMap() const { return tf_to_mkl_dim_map_; }
size_t tf_dim_idx(int index) const { return tf_to_mkl_dim_map_[index]; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Channel dimension.
bool IsMklChannelDim(int d) const { return tf_dim_idx(d) == MklDims::C; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Batch dimension.
bool IsMklBatchDim(int d) const { return tf_dim_idx(d) == MklDims::N; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Width dimension.
bool IsMklWidthDim(int d) const { return tf_dim_idx(d) == MklDims::W; }
// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
// corresponds to MKL's Height dimension.
bool IsMklHeightDim(int d) const { return tf_dim_idx(d) == MklDims::H; }
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NCHW format.
bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
// Check if the TF-Mkl dimension ordering map specifies if the input
// tensor is in NHWC format.
bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
void GetConvertedFlatData(dnnLayout_t targetLayout, void* input,
void* output) const {
dnnLayout_t curLayout;
if (isMklTensor_)
curLayout = mklLayout_;
else
curLayout = tfLayout_;
dnnPrimitive_t convert;
CHECK_EQ(dnnConversionCreate_F32(&convert, curLayout, targetLayout),
E_SUCCESS);
CHECK_EQ(dnnConversionExecute_F32(convert, input, output), E_SUCCESS);
CHECK_EQ(dnnDelete_F32(convert), E_SUCCESS);
}
// The following methods are used for serializing and de-serializing the
// contents of the mklshape object.
// The data is serialized in this order
// isMklTensor_
// dimension_
// sizes_
// strides_
// mklLayout_
// tfLayout_
// tf_to_mkl_dim_map_
#define SIZE_OF_MKL_DNN_BUF \
(dnnLayoutSerializationBufferSize_F32()) // Size of buffer needed to
// serialize dnn_layout pointer
// Size of buffer to hold the serialized object, the size is computed as
// follows sizeof(isMklTensor_) + sizeof(dimension_) + sizeof(sizes_) +
// sizeof(strides_)
// + sizeof(mklLayout_ buffer) + sizeof(tfLayout_ buffer)
// + sizeof(tf_to_mkl_dim_map_)
#define SIZE_OF_MKL_SERIAL_DATA(dims) \
(2 * sizeof(size_t) + 3 * dims * sizeof(size_t) + 2 * SIZE_OF_MKL_DNN_BUF)
// First we need to define some macro for offsets into the serial buffer where
// different elements of Mklshape is written/read from
#define IS_MKL_TENSOR_OFFSET 0
// Location from start of buffer where isMklTensor_ is serialized
#define DIMS_OFFSET \
(IS_MKL_TENSOR_OFFSET + sizeof(size_t)) // Location of dimension_
// Location of sizes. Note dim is not used here, left here
// to make macros consistent.
#define SIZES_OFFSET(dims) (DIMS_OFFSET + sizeof(size_t))
#define STRIDES_OFFSET(dims) \
(SIZES_OFFSET(dims) + dims * sizeof(size_t)) // Location of strides
#define MKL_LAYOUT_OFFSET(dims) \
(STRIDES_OFFSET(dims) + dims * sizeof(size_t)) // Location of mklLayout_
#define TF_LAYOUT_OFFSET(dims) \
(MKL_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF) // Location of tfLayout_
// Location of tf_to_mkl_dim_map_
#define TF_TO_MKL_DIM_MAP_OFFSET(dims) \
(TF_LAYOUT_OFFSET(dims) + SIZE_OF_MKL_DNN_BUF)
// TODO(agramesh1) make sure to create a const to share with rewrite pass
// for min size of MKL metadata tensor.
void DeSerializeMklShape(const unsigned char* buf, size_t buf_size) {
CHECK(buf_size >= sizeof(size_t)) << "Bufsize too small in DeSerialize";
// Make sure buffer holds at least isMklTensor_
isMklTensor_ =
*reinterpret_cast<const size_t*>(buf + IS_MKL_TENSOR_OFFSET) != 0;
if (isMklTensor_) { // If it is an MKL Tensor then read the rest
dimension_ = *(reinterpret_cast<const size_t*>(buf + DIMS_OFFSET));
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small in DeSerialize";
sizes_ = new size_t[dimension_];
strides_ = new size_t[dimension_];
tf_to_mkl_dim_map_ = new size_t[dimension_];
for (int i = 0; i < dimension_; i++) {
sizes_[i] =
reinterpret_cast<const size_t*>(buf + SIZES_OFFSET(dimension_))[i];
strides_[i] = reinterpret_cast<const size_t*>(
buf + STRIDES_OFFSET(dimension_))[i];
tf_to_mkl_dim_map_[i] = reinterpret_cast<const size_t*>(
buf + TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i];
}
CHECK_EQ(dnnLayoutDeserialize_F32(&mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(dnnLayoutDeserialize_F32(&tfLayout_,
buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
void SerializeMklShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= SIZE_OF_MKL_SERIAL_DATA(dimension_))
<< "Bufsize too small to Serialize";
*reinterpret_cast<size_t*>(buf + IS_MKL_TENSOR_OFFSET) =
isMklTensor_ ? 1 : 0;
if (isMklTensor_) {
*(reinterpret_cast<size_t*>(buf + DIMS_OFFSET)) = dimension_;
for (int i = 0; i < dimension_; i++) {
reinterpret_cast<size_t*>(buf + SIZES_OFFSET(dimension_))[i] =
sizes_[i];
reinterpret_cast<size_t*>(buf + STRIDES_OFFSET(dimension_))[i] =
strides_[i];
reinterpret_cast<size_t*>(buf +
TF_TO_MKL_DIM_MAP_OFFSET(dimension_))[i] =
tf_to_mkl_dim_map_[i];
}
CHECK_EQ(dnnLayoutSerialize_F32(mklLayout_,
buf + MKL_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
CHECK_EQ(
dnnLayoutSerialize_F32(tfLayout_, buf + TF_LAYOUT_OFFSET(dimension_)),
E_SUCCESS);
}
}
private:
bool isMklTensor_ =
false; // Flag to indicate if the tensor is an MKL tensor or not
dnnLayout_t mklLayout_ = nullptr; // Pointer to the MKL layout
dnnLayout_t tfLayout_ = nullptr; // Pointer to layout of corresponding
// Tensorflow tensor, used when conversion from MKL to standard tensor
size_t dimension_ = 0;
size_t* sizes_ = nullptr; // Required by MKL for conversions
size_t* strides_ = nullptr; // Required by MKL for conversions
size_t* tf_to_mkl_dim_map_ =
nullptr; // TF dimension corresponding to this MKL dimension
};
#ifndef INTEL_MKL_ML
// Forward decl
TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format);
memory::dims CalculateTFStrides(const memory::dims& dims_tf_order);
memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype);
class MklDnnShape {
private:
typedef struct {
/// Flag to indicate if the tensor is an MKL tensor or not
bool is_mkl_tensor_ = false;
/// Number of dimensions in Tensorflow format
size_t dimension_ = 0;
/// Required by MKLDNN for conversions
mkldnn_dims_t sizes_; // Required by MKL for conversions
memory::format tf_data_format_ = memory::format::format_undef;
memory::data_type T_ = memory::data_type::data_undef;
// MKL layout
mkldnn_memory_desc_t mkl_md_;
/// TF dimension corresponding to this MKL dimension
mkldnn_dims_t map_;
} MklShapeData;
MklShapeData data_;
typedef std::remove_extent<mkldnn_dims_t>::type mkldnn_dim_t;
#define INVALID_DIM_SIZE -1
public:
MklDnnShape() {
for (size_t i = 0; i < sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
++i) {
data_.sizes_[i] = -1;
}
for (size_t i = 0; i < sizeof(data_.map_) / sizeof(data_.map_[0]); ++i) {
data_.map_[i] = -1;
}
}
~MklDnnShape() {}
TF_DISALLOW_COPY_AND_ASSIGN(MklDnnShape); // Cannot copy
/// Helper function to compare memory::desc objects for MklDnn.
/// May be this should go into MklDnn directly.
inline bool CompareMklDnnLayouts(const memory::desc& md1,
const memory::desc& md2) const {
mkldnn_memory_desc_t mdd1 = md1.data;
mkldnn_memory_desc_t mdd2 = md2.data;
const char* d1 = reinterpret_cast<const char*>(&mdd1);
const char* d2 = reinterpret_cast<const char*>(&mdd2);
size_t md_size = sizeof(mdd1);
for (size_t i = 0; i < md_size; i++) {
if (*d1++ != *d2++) {
return false;
}
}
return true;
}
/// Equality function for MklDnnShape objects
/// @return true if both are equal; false otherwise.
inline bool operator==(const MklDnnShape& input_shape) const {
if (this->IsMklTensor() != input_shape.IsMklTensor()) {
return false;
}
// If input tensors are in Mkl layout, then we check for dimensions and
// sizes.
if (this->IsMklTensor()) {
return this->GetTfShape() == input_shape.GetTfShape() &&
CompareMklDnnLayouts(this->GetMklLayout(),
input_shape.GetMklLayout());
}
return true;
}
/// Equality operator for MklDnnShape and TFShape.
/// Returns: true if TF shapes for both are the same, false otherwise
inline bool operator==(const TensorShape& input_shape) const {
if (!this->IsMklTensor()) {
return false;
}
return this->GetTfShape() == input_shape;
}
inline const bool IsMklTensor() const { return data_.is_mkl_tensor_; }
inline void SetMklTensor(bool is_mkl_tensor) {
data_.is_mkl_tensor_ = is_mkl_tensor;
}
inline void SetDimensions(const size_t dimension) {
data_.dimension_ = dimension;
}
inline size_t GetDimension(char dimension) const {
int index = GetMklDnnTensorDimIndex(dimension);
CHECK(index >= 0 && index < this->GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return this->DimSize(index);
}
inline int32 GetMklDnnTensorDimIndex(char dimension) const {
switch (dimension) {
case 'N':
return MklDnnDims::Dim_N;
case 'C':
return MklDnnDims::Dim_C;
case 'H':
return MklDnnDims::Dim_H;
case 'W':
return MklDnnDims::Dim_W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline size_t GetDimension() const { return data_.dimension_; }
inline const int* GetSizes() const {
return reinterpret_cast<const int*>(&data_.sizes_[0]);
}
// Returns an mkldnn::memory::dims object that contains the sizes of this
// MklDnnShape object.
inline memory::dims GetSizesAsMklDnnDims() const {
memory::dims retVal;
if (data_.is_mkl_tensor_) {
size_t dimensions = sizeof(data_.sizes_) / sizeof(data_.sizes_[0]);
for (size_t i = 0; i < dimensions; i++) {
if (data_.sizes_[i] != INVALID_DIM_SIZE)
retVal.push_back(data_.sizes_[i]);
}
} else {
CHECK_EQ(data_.is_mkl_tensor_, true);
}
return retVal;
}
inline int64 DimSize(int index) const {
CHECK_LT(index, sizeof(data_.sizes_) / sizeof(data_.sizes_[0]));
return data_.sizes_[index];
}
/// Return TensorShape that describes the Tensorflow shape of the tensor
/// represented by this MklShape.
inline TensorShape GetTfShape() const {
CHECK_EQ(data_.is_mkl_tensor_, true);
std::vector<int32> shape(data_.dimension_, -1);
if (data_.tf_data_format_ != memory::format::blocked) {
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[TfDimIdx(idx)];
}
} else {
// If Tensorflow shape is in Blocked format, then we don't have dimension
// map for it. So we just create Tensorflow shape from sizes in the
// specified order.
for (size_t idx = 0; idx < data_.dimension_; ++idx) {
shape[idx] = data_.sizes_[idx];
}
}
TensorShape ts;
bool ret = TensorShapeUtils::MakeShape(shape, &ts).ok();
CHECK_EQ(ret, true);
return ts;
}
inline void SetElemType(memory::data_type dt) { data_.T_ = dt; }
inline const memory::data_type GetElemType() { return data_.T_; }
inline void SetMklLayout(memory::primitive_desc* pd) {
CHECK_NOTNULL(pd);
data_.mkl_md_ = pd->desc().data;
}
inline void SetMklLayout(memory::desc* md) {
CHECK_NOTNULL(md);
data_.mkl_md_ = md->data;
}
inline const memory::desc GetMklLayout() const {
return memory::desc(data_.mkl_md_);
}
inline memory::format GetTfDataFormat() const {
return data_.tf_data_format_;
}
/// We don't create primitive_descriptor for TensorFlow layout now.
/// We use lazy evaluation and create it only when needed. Input format can
/// also be Blocked format.
inline void SetTfLayout(size_t dims, const memory::dims& sizes,
memory::format format) {
CHECK_EQ(dims, sizes.size());
data_.dimension_ = dims;
for (size_t ii = 0; ii < dims; ii++) {
data_.sizes_[ii] = sizes[ii];
}
data_.tf_data_format_ = format;
if (format != memory::format::blocked) {
SetTfDimOrder(dims, format);
}
}
inline const memory::desc GetTfLayout() const {
memory::dims dims;
for (size_t ii = 0; ii < data_.dimension_; ii++) {
dims.push_back(data_.sizes_[ii]);
}
// Create Blocked memory desc if input TF format was set like that.
if (data_.tf_data_format_ == memory::format::blocked) {
auto strides = CalculateTFStrides(dims);
return CreateBlockedMemDescHelper(dims, strides, data_.T_);
} else {
return memory::desc(dims, data_.T_, data_.tf_data_format_);
}
}
inline const memory::desc GetCurLayout() const {
return IsMklTensor() ? GetMklLayout() : GetTfLayout();
}
// nhasabni - I've removed SetTfDimOrder that was setting default order in
// case of MKL-ML. We don't need a case of default dimension order because
// when an operator that does not get data_format attribute gets all inputs
// in Tensorflow format, it will produce output in Tensorflow format.
inline void SetTfDimOrder(const size_t dimension, const mkldnn_dims_t map) {
CHECK(dimension == data_.dimension_);
for (size_t ii = 0; ii < dimension; ii++) {
data_.map_[ii] = map[ii];
}
}
inline void SetTfDimOrder(const size_t dimension, TensorFormat data_format) {
// TODO(nhasabni): Why do we restrict this to 4D?
CHECK_EQ(dimension, 4);
CHECK(dimension == data_.dimension_);
data_.map_[GetTensorDimIndex<2>(data_format, 'W')] = MklDnnDims::Dim_W;
data_.map_[GetTensorDimIndex<2>(data_format, 'H')] = MklDnnDims::Dim_H;
data_.map_[GetTensorDimIndex<2>(data_format, 'C')] = MklDnnDims::Dim_C;
data_.map_[GetTensorDimIndex<2>(data_format, 'N')] = MklDnnDims::Dim_N;
}
inline void SetTfDimOrder(const size_t dimension, memory::format format) {
TensorFormat data_format = MklDnnDataFormatToTFDataFormat(format);
SetTfDimOrder(dimension, data_format);
}
inline const mkldnn_dim_t* GetTfToMklDimMap() const { return &data_.map_[0]; }
inline size_t TfDimIdx(int index) const { return data_.map_[index]; }
inline int64 TfDimSize(int index) const {
return data_.sizes_[TfDimIdx(index)];
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Channel dimension.
inline bool IsMklChannelDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_C;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Batch dimension.
inline bool IsMklBatchDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_N;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Width dimension.
inline bool IsMklWidthDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_W;
}
/// Query TF-MKL dimension ordering map and check if Tensorflow dimension 'd'
/// corresponds to MKL's Height dimension.
inline bool IsMklHeightDim(int d) const {
return TfDimIdx(d) == MklDnnDims::Dim_H;
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NCHW format.
inline bool IsTensorInNCHWFormat() const {
TensorFormat data_format = FORMAT_NCHW;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// Check if the TF-Mkl dimension ordering map specifies if the input
/// tensor is in NHWC format.
inline bool IsTensorInNHWCFormat() const {
TensorFormat data_format = FORMAT_NHWC;
return (IsMklBatchDim(GetTensorDimIndex<2>(data_format, 'N')) &&
IsMklChannelDim(GetTensorDimIndex<2>(data_format, 'C')) &&
IsMklHeightDim(GetTensorDimIndex<2>(data_format, 'H')) &&
IsMklWidthDim(GetTensorDimIndex<2>(data_format, 'W')));
}
/// The following methods are used for serializing and de-serializing the
/// contents of the mklshape object.
/// The data is serialized in this order
/// is_mkl_tensor_ : dimension_ : sizes_ : map_: format_ : T_ : mkl_pd_;
/// Size of buffer to hold the serialized object, the size is computed by
/// following above mentioned order
inline size_t GetSerializeBufferSize() const { return sizeof(MklShapeData); }
void SerializeMklDnnShape(unsigned char* buf, size_t buf_size) const {
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small to SerializeMklDnnShape";
*reinterpret_cast<MklShapeData*>(buf) = data_;
}
void DeSerializeMklDnnShape(const unsigned char* buf, size_t buf_size) {
// Make sure buffer holds at least is_mkl_tensor_.
CHECK(buf_size >= sizeof(data_.is_mkl_tensor_))
<< "Buffer size is too small in DeSerializeMklDnnShape";
const bool is_mkl_tensor = *reinterpret_cast<const bool*>(buf);
if (is_mkl_tensor) { // If it is an MKL Tensor then read the rest
CHECK(buf_size >= GetSerializeBufferSize())
<< "Buffer size is too small in DeSerializeMklDnnShape";
data_ = *reinterpret_cast<const MklShapeData*>(buf);
}
}
};
#endif
// List of MklShape objects. Used in Concat/Split layers.
typedef std::vector<MklShape> MklShapeList;
#ifndef INTEL_MKL_ML
typedef std::vector<MklDnnShape> MklDnnShapeList;
#endif
// Check if all tensors specified by MklShapes are MKL tensors.
inline bool AreAllMklTensors(const MklShapeList& shapes) {
for (auto& s : shapes) {
if (!s.IsMklTensor()) {
return false;
}
}
return true;
}
#ifdef INTEL_MKL_ML
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
for (size_t j = 0; j < mkl_shape.GetDimension(); j++) {
// Outermost to innermost dimension
output_shape.AddDim(mkl_shape.GetSizes()[mkl_shape.tf_dim_idx(j)]);
}
// Allocate output tensor.
context->allocate_temp(DataTypeToEnum<T>::v(), output_shape, &output_tensor);
dnnLayout_t output_layout = static_cast<dnnLayout_t>(mkl_shape.GetTfLayout());
void* input_buffer = const_cast<T*>(mkl_tensor.flat<T>().data());
void* output_buffer = const_cast<T*>(output_tensor.flat<T>().data());
if (mkl_tensor.NumElements() != 0) {
mkl_shape.GetConvertedFlatData(output_layout, input_buffer, output_buffer);
}
return output_tensor;
}
#else
template <typename T>
inline Tensor ConvertMklToTF(OpKernelContext* context, const Tensor& mkl_tensor,
const MklDnnShape& mkl_shape) {
Tensor output_tensor;
TensorShape output_shape;
TF_CHECK_OK(
Status(error::Code::UNIMPLEMENTED, "Unimplemented conversion function"));
return output_tensor;
}
#endif
// Get the MKL shape from the second string tensor
inline void GetMklShape(OpKernelContext* ctext, int n, MklShape* mklshape) {
mklshape->DeSerializeMklShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#ifndef INTEL_MKL_ML
inline void GetMklShape(OpKernelContext* ctext, int n, MklDnnShape* mklshape) {
mklshape->DeSerializeMklDnnShape(
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.data(),
ctext->input(GetTensorMetaDataIndex(n, ctext->num_inputs()))
.flat<uint8>()
.size() *
sizeof(uint8));
}
#endif
// Gets the actual input
inline const Tensor& MklGetInput(OpKernelContext* ctext, int n) {
return ctext->input(GetTensorDataIndex(n, ctext->num_inputs()));
}
inline void GetMklInputList(OpKernelContext* ctext, StringPiece name,
OpInputList* input_tensors) {
CHECK_NOTNULL(input_tensors);
ctext->input_list(name, input_tensors);
}
#ifdef INTEL_MKL_ML
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#else
inline void GetMklShapeList(OpKernelContext* ctext, StringPiece name,
MklDnnShapeList* mkl_shapes) {
OpInputList input_mkl_tensors;
GetMklInputList(ctext, strings::StrCat("mkl_", name), &input_mkl_tensors);
for (int i = 0; i < input_mkl_tensors.size(); i++) {
(*mkl_shapes)[i].DeSerializeMklDnnShape(
input_mkl_tensors[i].flat<uint8>().data(),
input_mkl_tensors[i].flat<uint8>().size() * sizeof(uint8));
}
}
#endif
#ifndef INTEL_MKL_ML
/// Get shape of input tensor pointed by 'input_idx' in TensorShape format.
/// If the input tensor is in MKL layout, then obtains TensorShape from
/// MklShape.
inline TensorShape GetTfShape(OpKernelContext* context, size_t input_idx) {
// Sanity check.
CHECK_NOTNULL(context);
CHECK_LT(input_idx, context->num_inputs());
MklDnnShape input_mkl_shape;
GetMklShape(context, input_idx, &input_mkl_shape);
if (input_mkl_shape.IsMklTensor()) {
return input_mkl_shape.GetTfShape();
} else {
const Tensor& t = MklGetInput(context, input_idx);
return t.shape();
}
}
#endif
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(SIZE_OF_MKL_SERIAL_DATA(mkl_shape.GetDimension()));
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#ifndef INTEL_MKL_ML
// Allocate the output tensor, create a second output tensor that will contain
// the MKL shape serialized
inline void AllocateOutputSetMklShape(OpKernelContext* ctext, int n,
Tensor** output,
const TensorShape& tf_shape,
const MklDnnShape& mkl_shape) {
Tensor* second_tensor = nullptr;
TensorShape second_shape;
second_shape.AddDim(mkl_shape.GetSerializeBufferSize());
OP_REQUIRES_OK(
ctext, ctext->allocate_output(GetTensorDataIndex(n, ctext->num_outputs()),
tf_shape, output));
OP_REQUIRES_OK(ctext, ctext->allocate_output(
GetTensorMetaDataIndex(n, ctext->num_outputs()),
second_shape, &second_tensor));
mkl_shape.SerializeMklDnnShape(
second_tensor->flat<uint8>().data(),
second_tensor->flat<uint8>().size() * sizeof(uint8));
}
#endif
// Allocates a temp tensor and returns the data buffer for temporary storage.
// Currently
#ifndef INTEL_MKL_ML
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
const memory::primitive_desc& pd, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(pd.get_size() / sizeof(T) + 1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<T>().data());
}
#endif
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
dnnLayout_t lt_buff, void** buf_out) {
TensorShape tf_shape;
tf_shape.AddDim(
dnnLayoutGetMemorySize_F32(static_cast<dnnLayout_t>(lt_buff)) /
sizeof(float) +
1);
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<float>::v(),
tf_shape, tensor_out));
*buf_out = static_cast<void*>(tensor_out->flat<float>().data());
}
template <typename T>
inline void AllocTmpBuffer(OpKernelContext* context, Tensor* tensor_out,
TensorShape tf_shape) {
OP_REQUIRES_OK(context, context->allocate_temp(DataTypeToEnum<T>::v(),
tf_shape, tensor_out));
}
inline void GetStridesFromSizes(TensorFormat data_format, size_t* strides,
const size_t* sizes) {
// MKL requires strides in NCHW
if (data_format == FORMAT_NHWC) {
strides[0] = sizes[2];
strides[1] = sizes[0] * sizes[2];
strides[2] = 1;
strides[3] = sizes[0] * sizes[1] * sizes[2];
} else {
strides[0] = 1;
strides[1] = sizes[0];
strides[2] = sizes[0] * sizes[1];
strides[3] = sizes[0] * sizes[1] * sizes[2];
}
}
inline void MklSizesToTFSizes(OpKernelContext* context,
TensorFormat data_format_,
const MklShape& mkl_shape,
TensorShape* tf_shape) {
size_t tf_dim = mkl_shape.GetDimension();
const size_t* tf_sizes = mkl_shape.GetSizes();
OP_REQUIRES(context, tf_dim == 4,
errors::InvalidArgument("MKLSizesToTFSizes: size must be 4-dim"));
std::vector<int32> sizes;
sizes.push_back(tf_sizes[3]);
if (data_format_ == FORMAT_NHWC) {
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
sizes.push_back(tf_sizes[2]);
} else {
sizes.push_back(tf_sizes[2]);
sizes.push_back(tf_sizes[1]);
sizes.push_back(tf_sizes[0]);
}
OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape(sizes, tf_shape));
}
inline int32 GetMklTensorDimIndex(char dimension) {
switch (dimension) {
case 'N':
return MklDims::N;
case 'C':
return MklDims::C;
case 'H':
return MklDims::H;
case 'W':
return MklDims::W;
default:
LOG(FATAL) << "Invalid dimension: " << dimension;
return -1; // Avoid compiler warning about missing return value
}
}
inline int64 GetMklTensorDim(const MklShape& mkl_shape, char dimension) {
int index = GetMklTensorDimIndex(dimension);
CHECK(index >= 0 && index < mkl_shape.GetDimension())
<< "Invalid index from the dimension: " << index << ", " << dimension;
return mkl_shape.dim_size(index);
}
inline void CopyMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
const Tensor& meta = context->input(idx_meta_in);
Tensor output(data.dtype());
Tensor meta_output(meta.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, data.shape()));
CHECK(meta_output.CopyFrom(meta, meta.shape()));
context->set_output(idx_data_out, output);
context->set_output(idx_meta_out, meta_output);
}
#ifdef INTEL_MKL_ML
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#else
inline void CopyTfTensorInToOutWithShape(OpKernelContext* context, int idx_in,
int idx_out,
const TensorShape& shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
const Tensor& data = context->input(idx_data_in);
MklDnnShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
Tensor output(data.dtype());
// TODO(intel_tf): alternatively, call forward_input_to_output_with_shape(...)
CHECK(output.CopyFrom(data, shape));
context->set_output(idx_data_out, output);
}
#endif
#ifdef INTEL_MKL_ML
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, mkl_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#else
inline void ForwardTfTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
MklDnnShape dnn_shape_output;
dnn_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_out, dnn_shape_output);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
inline void ForwardMklTensorInToOut(OpKernelContext* context, int idx_in,
int idx_out) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_meta_in = GetTensorMetaDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
int idx_meta_out = GetTensorMetaDataIndex(idx_out, num_outputs);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
#ifndef INTEL_MKL_ML
inline void ForwardMklTensorInToOutWithMklShape(OpKernelContext* context,
int idx_in, int idx_out,
const MklDnnShape& mkl_shape) {
int num_inputs = context->num_inputs();
int num_outputs = context->num_outputs();
int idx_data_in = GetTensorDataIndex(idx_in, num_inputs);
int idx_data_out = GetTensorDataIndex(idx_out, num_outputs);
AllocateOutputSetMklShape(context, idx_out, mkl_shape);
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_data_in, idx_data_out);
} else {
context->set_output(idx_data_out, context->input(idx_data_in));
}
}
#endif
// Forward the MKL shape ONLY (used in elementwise and other ops where
// we call the eigen implementation and MKL shape is not used)
inline void ForwardMklMetaDataInToOut(OpKernelContext* context,
uint idx_data_in, uint idx_data_out) {
uint idx_meta_in = GetTensorMetaDataIndex(idx_data_in, context->num_inputs());
uint idx_meta_out =
GetTensorMetaDataIndex(idx_data_out, context->num_outputs());
if (IsRefType(context->input_dtype(idx_data_in))) {
context->forward_ref_input_to_ref_output(idx_meta_in, idx_meta_out);
} else {
context->set_output(idx_meta_out, context->input(idx_meta_in));
}
}
// Set a dummy MKL shape (called when the output is in TF format)
inline void SetDummyMklShapeOutput(OpKernelContext* context,
uint idx_data_out) {
MklShape mkl_shape_output;
mkl_shape_output.SetMklTensor(false);
AllocateOutputSetMklShape(context, idx_data_out, mkl_shape_output);
}
#ifdef INTEL_MKL_ML
// We don't need these functions in MKLDNN. We have defined equality operator
// on MklDnnShape class directly.
// Checks if the TF shape for both MKL tensors is the same or not
// Returns: true if both TF shapes are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const MklShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->GetDimension()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const MklShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->GetDimension() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->GetDimension();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->tf_dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const MklShape* input_shape_1) {
return MklCompareShapes(input_shape_1, input_shape_0);
}
// Checks if the TF shape for both tensors is the same or not
// Returns: true if TF shapes for both are the same, false otherwise
inline bool MklCompareShapes(const TensorShape* input_shape_0,
const TensorShape* input_shape_1) {
// Check for number of dimensions
if (input_shape_0->dims() != input_shape_1->dims()) {
return false;
}
// Check size of each dimension
size_t ndims = input_shape_0->dims();
for (size_t i = 0; i < ndims; i++) {
if (input_shape_0->dim_size(i) != input_shape_1->dim_size(i)) {
return false;
}
}
return true;
}
#endif
// These functions do not compile with MKL-DNN since mkl.h is missing.
// We may need to remove them later.
// TODO(intel_tf): Remove this routine when faster MKL layout conversion is
// out.
inline void MklNHWCToNCHW(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = input.dim_size(0);
int64 H = input.dim_size(1);
int64 W = input.dim_size(2);
int64 C = input.dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', H * W, C, 1, buf_in + n * stride_n, C,
buf_out + n * stride_n, H * W);
}
}
inline void MklNCHWToNHWC(const Tensor& input, Tensor** output) {
const float* buf_in = input.flat<float>().data();
float* buf_out = (*output)->flat<float>().data();
int64 N = (*output)->dim_size(0);
int64 H = (*output)->dim_size(1);
int64 W = (*output)->dim_size(2);
int64 C = (*output)->dim_size(3);
int64 stride_n = H * W * C;
#pragma omp parallel for num_threads(16)
for (int64 n = 0; n < N; ++n) {
mkl_somatcopy('R', 'T', C, H * W, 1, buf_in + n * stride_n, H * W,
buf_out + n * stride_n, C);
}
}
// -------------------------------------------------------------------
#ifndef INTEL_MKL_ML
/// Return MKL-DNN data type (memory::data_type) for input type T
///
/// @input None
/// @return memory::data_type corresponding to type T
template <typename T>
static memory::data_type MklDnnType();
/// Instantiation for float type. Add similar instantiations for other
/// type if needed.
template <>
memory::data_type MklDnnType<float>() {
return memory::data_type::f32;
}
/// Map TensorFlow's data format into MKL-DNN data format
///
/// @input: TensorFlow data format
/// @return: memory::format corresponding to TensorFlow data format;
/// Fails with an error if invalid data format.
inline memory::format TFDataFormatToMklDnnDataFormat(TensorFormat format) {
if (format == FORMAT_NHWC)
return memory::format::nhwc;
else if (format == FORMAT_NCHW)
return memory::format::nchw;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to get rid of compiler warning
return memory::format::format_undef;
}
/// Map MKL-DNN data format to TensorFlow's data format
///
/// @input: memory::format
/// @return: Tensorflow data format corresponding to memory::format
/// Fails with an error if invalid data format.
inline TensorFormat MklDnnDataFormatToTFDataFormat(memory::format format) {
if (format == memory::format::nhwc)
return FORMAT_NHWC;
else if (format == memory::format::nchw)
return FORMAT_NCHW;
TF_CHECK_OK(Status(error::Code::INVALID_ARGUMENT, "Unsupported data format"));
// Return to prevent compiler warnings, otherwise TF_CHECK_OK will ensure
// that we don't come here.
return FORMAT_NHWC;
}
/// Map TensorShape object into memory::dims required by MKL-DNN
///
/// This function will simply map input TensorShape into MKL-DNN dims
/// naively. So it will preserve the order of dimensions. E.g., if
/// input tensor is in NHWC format, then dims will be in NHWC format
/// also.
///
/// @input TensorShape object in shape
/// @return memory::dims corresponding to TensorShape
inline memory::dims TFShapeToMklDnnDims(const TensorShape& shape) {
memory::dims dims(shape.dims());
for (int d = 0; d < shape.dims(); ++d) {
dims[d] = shape.dim_size(d);
}
return dims;
}
/// Map TensorShape object into memory::dims in NCHW format required by MKL-DNN
///
/// This function is a specific one than above function. It will map input
/// TensorShape into MKL-DNN dims in NCHW format. So it may not preserve the
/// order of dimensions. E.g., if input tensor is in NHWC format, then dims
/// will be in NCHW format, and not in NHWC format.
///
/// @input TensorShape object in shape
/// @return memory::dims in MKL-DNN required NCHW format
inline memory::dims TFShapeToMklDnnDimsInNCHW(const TensorShape& shape,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = shape.dim_size(GetTensorDimIndex(format, 'N'));
int c = shape.dim_size(GetTensorDimIndex(format, 'C'));
int h = shape.dim_size(GetTensorDimIndex(format, 'H'));
int w = shape.dim_size(GetTensorDimIndex(format, 'W'));
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Overloaded version of function above. Input parameters are
/// self-explanatory.
inline memory::dims MklDnnDimsInNCHW(const memory::dims& in_dims,
TensorFormat format) {
// Check validity of format.
CHECK_NE(TFDataFormatToMklDnnDataFormat(format),
memory::format::format_undef);
int n = in_dims[GetTensorDimIndex(format, 'N')];
int c = in_dims[GetTensorDimIndex(format, 'C')];
int h = in_dims[GetTensorDimIndex(format, 'H')];
int w = in_dims[GetTensorDimIndex(format, 'W')];
// MKL-DNN requires dimensions in NCHW format.
return memory::dims({n, c, h, w});
}
/// Map MklDnn memory::dims object into TensorShape object.
///
/// This function will simply map input shape in MKL-DNN memory::dims format
/// in Tensorflow's TensorShape object by perserving dimension order.
///
/// @input MKL-DNN memory::dims object
/// @output TensorShape corresponding to memory::dims
inline TensorShape MklDnnDimsToTFShape(const memory::dims& dims) {
std::vector<int32> shape(dims.size(), -1);
for (int d = 0; d < dims.size(); d++) {
shape[d] = dims[d];
}
TensorShape ret;
CHECK_EQ(TensorShapeUtils::MakeShape(shape, &ret).ok(), true);
return ret;
}
/// Function to calculate strides given tensor shape in Tensorflow order
/// E.g., if dims_tf_order is {1, 2, 3, 4}, then as per Tensorflow convention,
/// dimesion with size 1 is outermost dimension; while dimension with size 4 is
/// innermost dimension. So strides for this tensor would be {4 * 3 * 2,
/// 4 * 3, 4, 1}, i.e., {24, 12, 4, 1}.
///
/// @input Tensorflow shape in memory::dims type
/// @return memory::dims containing strides for the tensor.
inline memory::dims CalculateTFStrides(const memory::dims& dims_tf_order) {
CHECK_GT(dims_tf_order.size(), 0);
memory::dims strides(dims_tf_order.size());
int last_dim_idx = dims_tf_order.size() - 1;
strides[last_dim_idx] = 1;
for (int d = last_dim_idx - 1; d >= 0; d--) {
strides[d] = strides[d + 1] * dims_tf_order[d + 1];
}
return strides;
}
inline padding_kind TFPaddingToMklDnnPadding(Padding pad) {
// MKL-DNN only supports zero padding.
return padding_kind::zero;
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
inline memory::desc CreateBlockedMemDescHelper(const memory::dims& dim,
const memory::dims& strides,
memory::data_type dtype) {
CHECK_EQ(dim.size(), strides.size());
// We have to construct memory descriptor in a C style. This is not at all
// ideal but MKLDNN does not offer any API to construct descriptor in
// blocked format except a copy constructor that accepts
// mkldnn_memory_desc_t.
mkldnn_memory_desc_t md;
md.primitive_kind = mkldnn_memory;
md.ndims = dim.size();
md.format = mkldnn_blocked;
md.data_type = memory::convert_to_c(dtype);
for (size_t i = 0; i < dim.size(); i++) {
md.layout_desc.blocking.block_dims[i] = 1;
md.layout_desc.blocking.strides[1][i] = 1;
md.layout_desc.blocking.strides[0][i] = strides[i];
md.layout_desc.blocking.padding_dims[i] = dim[i];
md.layout_desc.blocking.offset_padding_to_data[i] = 0;
md.dims[i] = dim[i];
}
md.layout_desc.blocking.offset_padding = 0;
return memory::desc(md);
}
/*
* Class to represent all the resources corresponding to a tensor in TensorFlow
* that are required to execute an operation (such as Convolution).
*/
template <typename T>
class MklDnnData {
private:
/// MKL-DNN memory primitive for input user memory
memory* user_memory_;
/// MKL-DNN memory primitive in case input or output reorder is needed.
memory* reorder_memory_;
/// Operations memory descriptor
memory::desc* op_md_;
/// CPU engine on which operation will be executed
const engine* cpu_engine_;
public:
explicit MklDnnData(const engine* e)
: user_memory_(nullptr),
reorder_memory_(nullptr),
op_md_(nullptr),
cpu_engine_(e) {}
~MklDnnData() {
cpu_engine_ = nullptr; // We don't own this.
delete (user_memory_);
delete (reorder_memory_);
delete (op_md_);
}
inline void* GetTensorBuffer(const Tensor* tensor) const {
CHECK_NOTNULL(tensor);
return const_cast<void*>(
static_cast<const void*>(tensor->flat<T>().data()));
}
/// Set user memory primitive using specified dimensions, memory format and
/// data_buffer. Function automatically uses element data type by using
/// input type T used for creating call object.
///
/// In a nutshell, function allows user to describe the input tensor to
/// an operation. E.g., filter of Conv2D is of shape {1, 2, 3, 4}, and
/// memory format HWIO, and the buffer that contains actual values is
/// pointed by data_buffer.
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
void* data_buffer = nullptr) {
auto md = memory::desc(dim, MklDnnType<T>(), fm);
SetUsrMem(md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, memory::format fm,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, fm, GetTensorBuffer(tensor));
}
/// Helper function to create memory descriptor in Blocked format
///
/// @input: Tensor dimensions
/// @input: strides corresponding to dimensions. One can use utility
/// function such as CalculateTFStrides to compute strides
/// for given dimensions.
/// @return: memory::desc object corresponding to blocked memory format
/// for given dimensions and strides.
static inline memory::desc CreateBlockedMemDesc(const memory::dims& dim,
const memory::dims& strides) {
return CreateBlockedMemDescHelper(dim, strides, MklDnnType<T>());
}
/// A version of SetUsrMem call that allows user to create memory in blocked
/// format. So in addition to accepting dimensions, it also accepts strides.
/// This allows user to create memory for tensor in a format that is not
/// supported by MKLDNN. E.g., MKLDNN does not support tensor format for 6
/// dimensional tensor as a native format. But by using blocked format, a user
/// can create memory for 6D tensor.
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
void* data_buffer = nullptr) {
CHECK_EQ(dim.size(), strides.size());
auto blocked_md = MklDnnData<T>::CreateBlockedMemDesc(dim, strides);
SetUsrMem(blocked_md, data_buffer);
}
inline void SetUsrMem(const memory::dims& dim, const memory::dims& strides,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(dim, strides, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts memory
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::desc& md, void* data_buffer = nullptr) {
auto pd = memory::primitive_desc(md, *cpu_engine_);
SetUsrMem(pd, data_buffer);
}
/// A version of SetUsrMem with memory descriptor and tensor
inline void SetUsrMem(const memory::desc& md, const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(md, GetTensorBuffer(tensor));
}
/// A version of function to set user memory primitive that accepts primitive
/// descriptor directly, instead of accepting dimensions and format. This
/// function is more generic that the one above, but the function above is
/// sufficient in most cases.
inline void SetUsrMem(const memory::primitive_desc& pd,
void* data_buffer = nullptr) {
CHECK_NOTNULL(cpu_engine_);
// TODO(nhasabni): can we remove dynamic memory allocation?
if (data_buffer) {
user_memory_ = new memory(pd, data_buffer);
} else {
user_memory_ = new memory(pd);
}
}
/// A version of SetUsrMem with primitive descriptor and tensor
inline void SetUsrMem(const memory::primitive_desc& pd,
const Tensor* tensor) {
CHECK_NOTNULL(tensor);
SetUsrMem(pd, GetTensorBuffer(tensor));
}
/// Get function for user memory primitive.
inline const memory* GetUsrMem() const { return user_memory_; }
/// Get function for primitive descriptor of user memory primitive.
inline const memory::primitive_desc GetUsrMemPrimDesc() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_primitive_desc();
}
/// Get function for descriptor of user memory.
inline memory::desc GetUsrMemDesc() {
// This is ugly. Why MKL-DNN does not provide desc() method of const type??
const memory::primitive_desc pd = GetUsrMemPrimDesc();
return const_cast<memory::primitive_desc*>(&pd)->desc();
}
/// Get function for data buffer of user memory primitive.
inline void* GetUsrMemDataHandle() const {
CHECK_NOTNULL(user_memory_);
return user_memory_->get_data_handle();
}
/// Set function for data buffer of user memory primitive.
inline void* SetUsrMemDataHandle(void* data_buffer) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(data_buffer);
return user_memory_->set_data_handle(data_buffer);
}
/// Set function for data buffer of user memory primitive.
inline void SetUsrMemDataHandle(const Tensor* tensor) {
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(tensor);
user_memory_->set_data_handle(GetTensorBuffer(tensor));
}
/// Get the memory primitive for input and output of an op. If inputs
/// to an op require reorders, then this function returns memory primitive
/// for reorder. Otherwise, it will return memory primitive for user memory.
///
/// E.g., Conv2D(I, F) is a primitive with I and F being inputs. Then to
/// execute Conv2D, we need memory primitive for I and F. Buf if reorder is
/// required for I and F (say I_r is reorder primitive for I; F_r is reorder
/// primitive for F), then we need I_r and F_r to perform Conv2D.
inline const memory& GetOpMem() const {
return reorder_memory_ ? *reorder_memory_ : *user_memory_;
}
/// Set memory descriptor of an operation in terms of dimensions and memory
/// format. E.g., For Conv2D, the dimensions would be same as user dimensions
/// but memory::format would be mkldnn::any because we want MKL-DNN to choose
/// best layout/format for given input dimensions.
inline void SetOpMemDesc(const memory::dims& dim, memory::format fm) {
// TODO(nhasabni): can we remove dynamic memory allocation?
op_md_ = new memory::desc(dim, MklDnnType<T>(), fm);
}
/// Get function for memory descriptor for an operation
inline const memory::desc& GetOpMemDesc() const { return *op_md_; }
/// Predicate that checks if we need to reorder user's memory into memory
/// pointed by op_pd.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::primitive_desc& op_pd) const {
CHECK_NOTNULL(user_memory_);
return op_pd != user_memory_->get_primitive_desc();
}
/// Predicate that checks if we need to reorder user's memory into memory
/// based on the provided format.
///
/// @input: target_format - memory format of the given input of an
/// operation
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool IsReorderNeeded(const memory::format& target_format) const {
CHECK_NOTNULL(user_memory_);
return target_format !=
user_memory_->get_primitive_desc().desc().data.format;
}
/// Function to create a reorder from memory pointed by from to memory pointed
/// by to. Returns created primitive.
inline primitive CreateReorder(const memory* from, const memory* to) const {
CHECK_NOTNULL(from);
CHECK_NOTNULL(to);
return reorder(*from, *to);
}
/// Function to handle input reordering
///
/// Check if we need to reorder this input of an operation.
/// Return true and allocate reorder memory primitive if reorder is needed.
/// Otherwise, return false and do not allocate reorder memory primitive.
///
/// To check if reorder is needed, this function compares memory primitive
/// descriptor of an operation (op_pd) for the given input with the
/// user-specified memory primitive descriptor.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Overloaded version of above function that accepts memory buffer
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_data_handle - memory buffer where output of reorder needs to be
/// stored. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
void* reorder_data_handle,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_data_handle);
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd, reorder_data_handle);
net->push_back(CreateReorder(user_memory_, reorder_memory_));
return true;
}
return false;
}
/// Another overloaded version of CheckReorderToOpMem that accepts Tensor
/// where output of reorder needs to be stored.
///
/// @input: op_pd - memory primitive descriptor of the given input of an
/// operation
/// @reorder_tensor - Tensor whose buffer is to be used to store output of
/// reorder. Primitive does not check if buffer is
/// enough size to write.
/// @input: net - net to which to add reorder primitive in case it is needed.
/// @return: true in case reorder of input is needed; false, otherwise.
inline bool CheckReorderToOpMem(const memory::primitive_desc& op_pd,
Tensor* reorder_tensor,
std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(reorder_tensor);
return CheckReorderToOpMem(op_pd, GetTensorBuffer(reorder_tensor), net);
}
/// Function to handle output reorder
///
/// This function performs very similar functionality as input reordering
/// function above. The only difference is that this function does not add
/// reorder primitive to the net. The reason for this is: the reorder
/// primitive for output needs to be added to the list only after operation
/// has executed. But we need to prepare a temporary buffer in case output
/// reorder is needed. And this temporary buffer will hold the output of
/// an operation before it is fed to reorder primitive.
///
/// @input memory primitive descriptor for the given output of an operation
/// @return: true in case reorder of output is needed; false, otherwise.
inline bool PrepareReorderToUserMemIfReq(
const memory::primitive_desc& op_pd) {
CHECK_NOTNULL(user_memory_);
if (IsReorderNeeded(op_pd)) {
// TODO(nhasabni): can we remove dynamic memory allocation?
reorder_memory_ = new memory(op_pd);
return true;
}
return false;
}
/// Function to actually insert reorder primitive in the net
///
/// This function completes remaining part of output reordering. It inserts
/// a reordering primitive from the temporary buffer that holds the output
/// to the user-specified output buffer.
///
/// @input: net - net to which to add reorder primitive
inline void InsertReorderToUserMem(std::vector<primitive>* net) {
CHECK_NOTNULL(net);
CHECK_NOTNULL(user_memory_);
CHECK_NOTNULL(reorder_memory_);
net->push_back(CreateReorder(reorder_memory_, user_memory_));
}
};
#endif // INTEL_MKL_ML
} // namespace tensorflow
#endif // INTEL_MKL
#endif // TENSORFLOW_CORE_UTIL_MKL_UTIL_H_
|
bezier_post_utility.h | //
// Project Name: Kratos
// Last Modified by: $Author: hbui $
// Date: $Date: 2013-10-12 $
// Revision: $Revision: 1.0 $
//
//
#if !defined(KRATOS_BEZIER_POST_UTILITY_H_INCLUDED )
#define KRATOS_BEZIER_POST_UTILITY_H_INCLUDED
// System includes
#include <string>
#include <vector>
#include <iostream>
// External includes
#include <omp.h>
#include "boost/progress.hpp"
#ifdef ISOGEOMETRIC_USE_MPI
#include "mpi.h"
#endif
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
#include "includes/element.h"
#include "includes/properties.h"
#include "includes/ublas_interface.h"
#include "includes/legacy_structural_app_vars.h"
#include "spaces/ublas_space.h"
#include "linear_solvers/linear_solver.h"
#include "utilities/openmp_utils.h"
#include "utilities/auto_collapse_spatial_binning.h"
#include "custom_geometries/isogeometric_geometry.h"
#include "custom_utilities/isogeometric_post_utility.h"
#include "isogeometric_application/isogeometric_application.h"
// #define DEBUG_LEVEL1
//#define DEBUG_LEVEL2
//#define DEBUG_MULTISOLVE
//#define DEBUG_GENERATE_MESH
// #define ENABLE_PROFILING
namespace Kratos
{
///@addtogroup IsogeometricApplication
///@{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
template<typename TDataType>
struct BezierPostUtility_Helper
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static TDataType& CalculateOnPoint(const Variable<TDataType>& rVariable,
TDataType& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates)
{
KRATOS_THROW_ERROR(std::logic_error, "Error calling unimplemented function", __FUNCTION__)
}
};
/// Short class definition.
/**
An advanced utility to export directly the FEM mesh out from isogeometric Bezier mesh. Each Bezier element will generate its own set of FEM elements. Therefore a large amount of nodes and elements may be generated.
One shall carefully use this utility for large problem. Previously, this class is named IsogeometricPostUtility.
*/
class BezierPostUtility : public IsogeometricPostUtility
{
public:
///@name Type Definitions
///@{
typedef boost::numeric::ublas::vector<double> ValuesContainerType;
typedef boost::numeric::ublas::matrix<double> ValuesArrayContainerType;
typedef typename ModelPart::NodesContainerType NodesArrayType;
typedef typename ModelPart::ElementsContainerType ElementsArrayType;
typedef typename ModelPart::ConditionsContainerType ConditionsArrayType;
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::PointType NodeType;
typedef IsogeometricGeometry<NodeType> IsogeometricGeometryType;
typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
typedef typename NodeType::DofsContainerType DofsContainerType;
typedef UblasSpace<double, CompressedMatrix, Vector> SerialSparseSpaceType;
typedef UblasSpace<double, Matrix, Vector> SerialDenseSpaceType;
typedef LinearSolver<SerialSparseSpaceType, SerialDenseSpaceType> LinearSolverType;
typedef std::size_t IndexType;
/// Pointer definition of BezierPostUtility
KRATOS_CLASS_POINTER_DEFINITION(BezierPostUtility);
///@}
///@name Life Cycle
///@{
/// Default constructor.
BezierPostUtility()
{
}
/// Destructor.
virtual ~BezierPostUtility()
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferNodalResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
#endif
NodesArrayType& pTargetNodes = r_model_part_post.Nodes();
ElementsArrayType& pElements = r_model_part.Elements();
typename TVariableType::Type Results;
CoordinatesArrayType LocalPos;
int ElementId;
// #pragma omp parallel for
//TODO: check this. This is not parallelized.
for(typename NodesArrayType::ptr_iterator it = pTargetNodes.ptr_begin(); it != pTargetNodes.ptr_end(); ++it)
{
ElementId = (*it)->GetSolutionStepValue(PARENT_ELEMENT_ID);
noalias(LocalPos) = (*it)->GetSolutionStepValue(LOCAL_COORDINATES);
Results = BezierPostUtility_Helper<typename TVariableType::Type>::CalculateOnPoint(rThisVariable, Results, pElements(ElementId), LocalPos);
(*it)->GetSolutionStepValue(rThisVariable) = Results;
}
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed: " << end_compute - start_compute << " s" << std::endl;
#else
std::cout << "Transfer nodal point results for " << rThisVariable.Name() << " completed" << std::endl;
#endif
}
// Synchronize post model_part with the reference model_part
template<class TVariableType>
void TransferIntegrationPointResults(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ModelPart& r_model_part_post,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
// firstly transfer rThisVariable from integration points of reference model_part to its nodes
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
// secondly transfer new nodal variables results to the post model_part
TransferNodalResults(rThisVariable, r_model_part, r_model_part_post);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
// Transfer the variable to nodes for model_part
template<class TVariableType>
void TransferVariablesToNodes(
const TVariableType& rThisVariable,
ModelPart& r_model_part,
ElementsArrayType& ElementsArray,
LinearSolverType::Pointer pSolver) const
{
#ifdef ENABLE_PROFILING
double start_compute = OpenMPUtils::GetCurrentTime();
std::cout << "########################################" << std::endl;
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " starts" << std::endl;
#endif
TransferVariablesToNodes(pSolver, r_model_part, ElementsArray, rThisVariable);
#ifdef ENABLE_PROFILING
double end_compute = OpenMPUtils::GetCurrentTime();
std::cout << "Transfer integration point results to nodes for "
<< rThisVariable.Name() << " completed: "
<< end_compute - start_compute << "s" << std::endl;
std::cout << "########################################" << std::endl;
#endif
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Input and output
///@{
/// Turn back information as a string.
virtual std::string Info() const
{
std::stringstream buffer;
buffer << "BezierPostUtility";
return buffer.str();
}
/// Print information about this object.
virtual void PrintInfo(std::ostream& rOStream) const
{
rOStream << "BezierPostUtility";
}
/// Print object's data.
virtual void PrintData(std::ostream& rOStream) const
{}
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<double>& rThisVariable,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* REMARKS:
* + currently this method only works with 6-components variable like STRESSES, PRESTRESS, etc
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<Vector>& rThisVariable,
const std::size_t& ncomponents = 6,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* @param check_active if false the activeness of the elements will not be checked; true otherwise
* REMARKS: this subroutine will only transfer the variables to nodes connecting with the mesh defined by ElementsArray
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part, ElementsArrayType& ElementsArray,
const Variable<array_1d<double, 3>>& rThisVariable,
const std::size_t& ncomponents = 3,
const bool& check_active = false) const;
/**
* Transfer of rThisVariable defined on integration points to corresponding
* nodal values. The transformation is done in a form that ensures a minimization
* of L_2-norm error (/sum{rThisVariable- f(x)) whereas
* f(x)= /sum{shape_func_i*rThisVariable_i}
* @param model_part model_part on which the transfer should be done
* @param rThisVariable Vector-Variable which should be transferred
* @ref Jiao&Heath: "Common-refinement-based data transfer...", Int.
* Journal for numer. meth. in eng. 61 (2004) 2402--2427
* WARNING: this may cause segmentation faults as the respective variables
* will be created on nodal level while they are originally intended to be
* stored on integration points!
* REMARKS:
* + currently this method only works with 3-components variable like WATER_FLOW, HEAT_FLOW, etc
* @param pSolver the solver used for solving the local system matrix
* @param pModelPart pointer to model_part that we wish to transfer the result from its integration points to its nodes
* @param rThisVariable the variable need to transfer the respected values
* @param ncomponents number of components of the nodal vector
* REMARKS: + this subroutine will transfer the variables to nodes connecting with the model_part. Shall not use this subroutine if there are many types of element in the model_part.
* + the activeness of the element will not be checked
*/
void TransferVariablesToNodes(LinearSolverType::Pointer& pSolver,
ModelPart& r_model_part,
const Variable<array_1d<double, 3>>& rThisVariable,
const std::size_t& ncomponents = 3,
const bool& check_active = false) const;
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
/// Assignment operator.
BezierPostUtility& operator=(BezierPostUtility const& rOther)
{
return *this;
}
/// Copy constructor.
BezierPostUtility(BezierPostUtility const& rOther)
{
}
///@}
}; // Class BezierPostUtility
///@}
template<>
struct BezierPostUtility_Helper<double>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static double& CalculateOnPoint(const Variable<double>& rVariable,
double& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<Vector>
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static Vector& CalculateOnPoint(const Variable<Vector>& rVariable,
Vector& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
template<>
struct BezierPostUtility_Helper<array_1d<double, 3> >
{
typedef typename Element::GeometryType GeometryType;
typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType;
/// Interpolation on element
static array_1d<double, 3>& CalculateOnPoint(const Variable<array_1d<double, 3> >& rVariable,
array_1d<double, 3>& rResult, Element::Pointer& pElement, const CoordinatesArrayType& rCoordinates);
};
///@name Type Definitions
///@{
///@}
///@name Input and output
///@{
/// input stream function
inline std::istream& operator >>(std::istream& rIStream, BezierPostUtility& rThis)
{
return rIStream;
}
/// output stream function
inline std::ostream& operator <<(std::ostream& rOStream,
const BezierPostUtility& rThis)
{
rThis.PrintInfo(rOStream);
rOStream << std::endl;
rThis.PrintData(rOStream);
return rOStream;
}
///@}
///@} addtogroup block
}// namespace Kratos.
#undef DEBUG_LEVEL1
#undef DEBUG_LEVEL2
#undef DEBUG_MULTISOLVE
#undef DEBUG_GENERATE_MESH
#undef ENABLE_PROFILING
#endif
|
7754.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 28)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp target teams distribute thread_limit(128) schedule(static, 28)
for (i = 0; i < _PB_N; i++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 28)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp target teams distribute thread_limit(128) schedule(static, 28)
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp target teams distribute thread_limit(128) schedule(static, 28)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
fftw.c |
/*----------------------------------------------------------------*/
#include "hclfftw.h"
#include <sys/time.h>
#include <omp.h>
/*----------------------------------------------------------------*/
fftw_plan fftw1d_init_plan(
const int sign,
const int m, const int n,
fftw_complex* X, fftw_complex* Y
)
{
/*
* 'm' number of 1D row FFTs of size 'n'
*/
int rank = 1;
int howmany = m;
int s[] = {n};
int idist = n;
int odist = n;
int istride = 1;
int ostride = 1;
int *inembed = s;
int *onembed = s;
return fftw_plan_many_dft(
rank, s, howmany,
X, inembed, istride, idist,
Y, onembed, ostride, odist,
sign, FFTW_ESTIMATE);
}
/*----------------------------------------------------------------*/
int
fftw2dlocal(
const int sign,
const unsigned int* m, const int n,
fftw_complex* lMatrix,
double* tGroups
)
{
double ts = omp_get_wtime();
fftw_plan plan = fftw1d_init_plan(
sign, *m, n,
lMatrix, lMatrix);
fftw_execute(plan);
fftw_destroy_plan(plan);
tGroups[0] += omp_get_wtime() - ts;
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal2(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 2, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(2)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 2, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal4(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 4, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(4)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 4, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal5(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 5, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(5)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 5, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal10(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
fftw_plan plan6 = fftw1d_init_plan(
sign, m[5], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n);
fftw_plan plan7 = fftw1d_init_plan(
sign, m[6], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n);
fftw_plan plan8 = fftw1d_init_plan(
sign, m[7], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n);
fftw_plan plan9 = fftw1d_init_plan(
sign, m[8], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n);
fftw_plan plan10 = fftw1d_init_plan(
sign, m[9], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 10, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(10)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan6);
fftw_destroy_plan(plan6);
tGroups[5] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan7);
fftw_destroy_plan(plan7);
tGroups[6] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan8);
fftw_destroy_plan(plan8);
tGroups[7] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan9);
fftw_destroy_plan(plan9);
tGroups[8] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan10);
fftw_destroy_plan(plan10);
tGroups[9] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 10, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int
fftw2dlocal20(
const int sign,
const unsigned int* m, const int n,
const int nThreadsPerGroup,
fftw_complex* lMatrix,
double* tGroups
)
{
double etime, tstart, tend;
struct timeval start, end;
gettimeofday(&start, NULL);
fftw_plan plan1 = fftw1d_init_plan(
sign, m[0], n,
lMatrix, lMatrix);
fftw_plan plan2 = fftw1d_init_plan(
sign, m[1], n,
lMatrix + m[0]*n,
lMatrix + m[0]*n);
fftw_plan plan3 = fftw1d_init_plan(
sign, m[2], n,
lMatrix + (m[0]+m[1])*n,
lMatrix + (m[0]+m[1])*n);
fftw_plan plan4 = fftw1d_init_plan(
sign, m[3], n,
lMatrix + (m[0]+m[1]+m[2])*n,
lMatrix + (m[0]+m[1]+m[2])*n);
fftw_plan plan5 = fftw1d_init_plan(
sign, m[4], n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3])*n);
fftw_plan plan6 = fftw1d_init_plan(
sign, m[5], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4])*n);
fftw_plan plan7 = fftw1d_init_plan(
sign, m[6], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5])*n);
fftw_plan plan8 = fftw1d_init_plan(
sign, m[7], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6])*n);
fftw_plan plan9 = fftw1d_init_plan(
sign, m[8], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7])*n);
fftw_plan plan10 = fftw1d_init_plan(
sign, m[9], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8])*n);
fftw_plan plan11 = fftw1d_init_plan(
sign, m[10], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9])*n);
fftw_plan plan12 = fftw1d_init_plan(
sign, m[11], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10])*n);
fftw_plan plan13 = fftw1d_init_plan(
sign, m[12], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11])*n);
fftw_plan plan14 = fftw1d_init_plan(
sign, m[13], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12])*n);
fftw_plan plan15 = fftw1d_init_plan(
sign, m[14], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13])*n);
fftw_plan plan16 = fftw1d_init_plan(
sign, m[15], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14])*n);
fftw_plan plan17 = fftw1d_init_plan(
sign, m[16], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15])*n);
fftw_plan plan18 = fftw1d_init_plan(
sign, m[17], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16])*n);
fftw_plan plan19 = fftw1d_init_plan(
sign, m[18], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17])*n);
fftw_plan plan20 = fftw1d_init_plan(
sign, m[19], n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17]+m[18])*n,
lMatrix + (m[0]+m[1]+m[2]+m[3]+m[4]+m[5]+m[6]+m[7]+m[8]+m[9]+m[10]+m[11]+m[12]+m[13]+m[14]+m[15]+m[16]+m[17]+m[18])*n);
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
etime = (tend - tstart);
gettimeofday(&start, NULL);
printf("g 20, t %d: Plan creation time %f\n",
nThreadsPerGroup, etime);
#pragma omp parallel sections num_threads(20)
{
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan1);
fftw_destroy_plan(plan1);
tGroups[0] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan2);
fftw_destroy_plan(plan2);
tGroups[1] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan3);
fftw_destroy_plan(plan3);
tGroups[2] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan4);
fftw_destroy_plan(plan4);
tGroups[3] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan5);
fftw_destroy_plan(plan5);
tGroups[4] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan6);
fftw_destroy_plan(plan6);
tGroups[5] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan7);
fftw_destroy_plan(plan7);
tGroups[6] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan8);
fftw_destroy_plan(plan8);
tGroups[7] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan9);
fftw_destroy_plan(plan9);
tGroups[8] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan10);
fftw_destroy_plan(plan10);
tGroups[9] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan11);
fftw_destroy_plan(plan11);
tGroups[10] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan12);
fftw_destroy_plan(plan12);
tGroups[11] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan13);
fftw_destroy_plan(plan13);
tGroups[12] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan14);
fftw_destroy_plan(plan14);
tGroups[13] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan15);
fftw_destroy_plan(plan15);
tGroups[14] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan16);
fftw_destroy_plan(plan16);
tGroups[15] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan17);
fftw_destroy_plan(plan17);
tGroups[16] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan18);
fftw_destroy_plan(plan18);
tGroups[17] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan19);
fftw_destroy_plan(plan19);
tGroups[18] += etime + omp_get_wtime() - ts;
}
#pragma omp section
{
double ts = omp_get_wtime();
fftw_execute(plan20);
fftw_destroy_plan(plan20);
tGroups[19] += etime + omp_get_wtime() - ts;
}
}
gettimeofday(&end, NULL);
tstart = start.tv_sec + start.tv_usec/1000000.;
tend = end.tv_sec + end.tv_usec/1000000.;
printf("g 20, t %d: Plan execution time %f\n",
nThreadsPerGroup, (tend - tstart));
return 0;
}
/*----------------------------------------------------------------*/
int fftwlocal(
const int sign,
const unsigned int* m,
const int n,
const int nThreadsPerGroup,
const int nThreadGroups,
fftw_complex *lMatrix,
double* tGroups
)
{
if (nThreadGroups == 1)
{
fftw2dlocal(
sign,
m, n,
lMatrix, tGroups);
}
if (nThreadGroups == 2)
{
fftw2dlocal2(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 4)
{
fftw2dlocal4(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 5)
{
fftw2dlocal5(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 10)
{
fftw2dlocal10(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
if (nThreadGroups == 20)
{
fftw2dlocal20(
sign,
m, n,
nThreadsPerGroup,
lMatrix, tGroups);
}
return 0;
}
/*----------------------------------------------------------------*/
|
fwk_render.h | // naive rendering framework
// - rlyeh, public domain
//
// IQM skeletal meshes by @lsalzman (public domain) - https://bit.ly/2OQh0Me
// SH code by @ands (public domain) - https://github.com/ands/spherical_harmonics_playground
// SHM code by @jarikomppa (unlicensed) - https://github.com/jarikomppa/shadertoolkit
#ifndef RENDER_H
#define RENDER_H
typedef unsigned handle; // GLuint
// -----------------------------------------------------------------------------
// colors
uint32_t rgba( uint8_t r, uint8_t g, uint8_t b, uint8_t a );
uint32_t bgra( uint8_t r, uint8_t g, uint8_t b, uint8_t a );
float alpha( uint32_t rgba );
#define RGBX(rgb,x) ((x)<<24|((rgb>>16)&255)<<16|((rgb>>8)&255)<<8|((rgb>>0)&255)<<0)
#define BLACK RGBX(0x000000,255)
#define RED RGBX(0xFF004D,255)
#define GREEN RGBX(0x00B543,255)
#define BLUE RGBX(0x065AB5,255)
#define ORANGE RGBX(0xFF6C24,255)
#define PURPLE RGBX(0x7E2553,255)
#define YELLOW RGBX(0xFFEC27,255)
#define WHITE RGBX(0xFFF1E8,255)
#define GRAY RGBX(0x725158,255)
// -----------------------------------------------------------------------------
// images
enum {
IMAGE_R = 0x01000,
IMAGE_RG = 0x02000,
IMAGE_RGB = 0x04000,
IMAGE_RGBA = 0x08000,
IMAGE_FLIP = 0x10000,
};
typedef struct image_t {
union { unsigned x, w; };
union { unsigned y, h; };
union { unsigned n, comps; };
union { void *pixels; unsigned char *pixels8; unsigned short *pixels16; unsigned *pixels32; float *pixelsf; };
} image_t;
image_t image(const char *pathfile, int flags);
image_t image_from_mem(const char *ptr, int len, int flags);
void image_destroy(image_t *img);
// -----------------------------------------------------------------------------
// textures
enum {
// UNIT[0..7]
TEXTURE_BC1 = 8, // DXT1, RGB with 8:1 compression ratio (+ optional 1bpp for alpha)
TEXTURE_BC2 = 16, // DXT3, RGBA with 4:1 compression ratio (BC1 for RGB + 4bpp for alpha)
TEXTURE_BC3 = 32, // DXT5, RGBA with 4:1 compression ratio (BC1 for RGB + BC4 for A)
// TEXTURE_BC4, // Alpha
TEXTURE_NEAREST = 0,
TEXTURE_LINEAR = 64,
TEXTURE_MIPMAPS = 128,
TEXTURE_EDGE = 0,
TEXTURE_BORDER = 0x100,
TEXTURE_REPEAT = 0x200,
TEXTURE_BYTE = 0,
TEXTURE_FLOAT = 0x400,
TEXTURE_COLOR = 0,
TEXTURE_DEPTH = 0x800,
TEXTURE_R = IMAGE_R,
TEXTURE_RG = IMAGE_RG,
TEXTURE_RGB = IMAGE_RGB,
TEXTURE_RGBA = IMAGE_RGBA,
TEXTURE_FLIP = IMAGE_FLIP,
// @fixme
TEXTURE_SRGB = 1 << 24,
TEXTURE_BGR = 1 << 25,
TEXTURE_ARRAY = 1 << 26,
};
typedef struct {
union { unsigned x, w; };
union { unsigned y, h; };
union { unsigned n, bpp; };
handle id;
unsigned flags;
} texture_t;
texture_t texture(const char* filename, int flags);
texture_t texture_from_mem(const char* ptr, int len, int flags);
texture_t texture_create(unsigned w, unsigned h, unsigned n, void *pixels, int flags);
texture_t texture_checker();
void texture_destroy(texture_t *t);
// textureLod(filename, dir, lod);
//void texture_add_loader( int(*loader)(const char *filename, int *w, int *h, int *bpp, int reqbpp, int flags) );
unsigned texture_update(texture_t *t, unsigned w, unsigned h, unsigned n, void *pixels, int flags);
// -----------------------------------------------------------------------------
// fullscreen quads
void fullscreen_rgb_quad( texture_t texture_rgb, float gamma );
void fullscreen_ycbcr_quad( texture_t texture_YCbCr[3], float gamma );
// -----------------------------------------------------------------------------
// sprites
void sprite( texture_t texture, float px, float py, float pz, float rot );
void sprite_ex( texture_t texture,
float px, float py, float pz, float rotation, // position(x,y,depth sort), angle
float ox, float oy, float sx, float sy, // offset(x,y), scale(x,y)
int additive, uint32_t rgba, // is_additive, tint color
float frame, float xcells, float ycells // frame_number in a 8x4 spritesheet
);
// -----------------------------------------------------------------------------
// cubemaps
typedef struct cubemap_t {
unsigned id; // texture id
vec3 sh[9]; // precomputed spherical harmonics coefficients
} cubemap_t;
cubemap_t cubemap( const image_t image, int flags ); // 1 equirectangular panorama
cubemap_t cubemap6( const image_t images[6], int flags ); // 6 cubemap faces
void cubemap_destroy(cubemap_t *c);
cubemap_t* cubemap_get_active();
// -----------------------------------------------------------------------------
// fbos
unsigned fbo( unsigned texture_color, unsigned texture_depth, int wr_flags );
void fbo_bind(unsigned id);
void fbo_unbind();
void fbo_destroy(unsigned id);
// -----------------------------------------------------------------------------
// shadowmaps
typedef struct shadowmap_t {
mat44 shadowmatrix;
mat44 mvp;
mat44 mv;
mat44 proj;
vec4 light_position;
int saved_fb;
int saved_viewport[4];
handle fbo, texture;
int texture_width;
} shadowmap_t;
shadowmap_t shadowmap(int texture_width); // = 1024
void shadowmap_destroy(shadowmap_t *s);
void shadowmap_set_shadowmatrix(shadowmap_t *s, vec3 aLightPos, vec3 aLightAt, vec3 aLightUp, const mat44 projection);
void shadowmap_begin(shadowmap_t *s);
void shadowmap_end(shadowmap_t *s);
// shadowmap utils
void shadowmatrix_proj(mat44 shm_proj, float aLightFov, float znear, float zfar);
void shadowmatrix_ortho(mat44 shm_proj, float left, float right, float bottom, float top, float znear, float zfar);
// -----------------------------------------------------------------------------
// shaders
unsigned shader(const char *vs, const char *fs, const char *attribs, const char *fragcolor);
unsigned shader_bind(unsigned program);
void shader_int(const char *uniform, int i);
void shader_float(const char *uniform, float f);
void shader_vec2(const char *uniform, vec2 v);
void shader_vec3(const char *uniform, vec3 v);
void shader_vec4(const char *uniform, vec4 v);
void shader_mat44(const char *uniform, mat44 m);
void shader_texture(const char *sampler, unsigned texture, unsigned unit);
unsigned shader_get_active();
void shader_destroy(unsigned shader);
// -----------------------------------------------------------------------------
// meshes (@fixme: deprecate?)
enum MESH_FLAGS {
MESH_STATIC = 0, // STATIC, DYNAMIC, STREAM // zero|single|many updates per frame
MESH_STREAM = 1,
MESH_TRIANGLE_STRIP = 2,
};
typedef struct mesh_t {
handle vao, vbo, ibo;
unsigned vertex_count;
unsigned index_count;
unsigned flags;
} mesh_t;
mesh_t mesh_create(const char *format, int vertex_stride,int vertex_count,const void *interleaved_vertex_data, int index_count,const void *index_data, int flags);
void mesh_upgrade(mesh_t *m, const char *format, int vertex_stride,int vertex_count,const void *interleaved_vertex_data, int index_count,const void *index_data, int flags);
void mesh_push_state(mesh_t *m, unsigned program, unsigned texture_id, float model[16], float view[16], float proj[16], unsigned billboard);
void mesh_pop_state(mesh_t *m);
void mesh_render(mesh_t *m);
void mesh_destroy(mesh_t *m);
aabb mesh_bounds(mesh_t *m);
// -----------------------------------------------------------------------------
// materials (@todo)
//
// typedef struct material_t {
// const char *name;
// texture_t texture;
// uint32_t color;
// } material_t;
// -----------------------------------------------------------------------------
// models
enum {
MODEL_NO_ANIMATIONS = 1,
MODEL_NO_MESHES = 2,
MODEL_NO_TEXTURES = 4,
};
typedef struct model_t {
struct iqm_t *iqm;
unsigned num_meshes;
unsigned num_triangles;
unsigned num_joints; // num_poses;
unsigned num_anims;
unsigned num_frames;
float curframe;
mat44 pivot;
} model_t;
model_t model(const char *filename, int flags);
model_t model_from_mem(const void *mem, int sz, int flags);
float model_animate(model_t, float curframe);
float model_animate_clip(model_t, float curframe, int minframe, int maxframe, bool loop);
aabb model_aabb(model_t, mat44 transform);
void model_render2(model_t, mat44 proj, mat44 view, mat44 model, int shader);
void model_render(model_t, mat44 proj, mat44 view, mat44 model);
void model_destroy(model_t);
// -----------------------------------------------------------------------------
// skyboxes
typedef struct skybox_t {
handle program;
mesh_t geometry;
cubemap_t cubemap;
int flags;
} skybox_t;
skybox_t skybox(const char *panorama_or_cubemap_folder, int flags);
int skybox_push_state(skybox_t *sky, mat44 proj, mat44 view);
int skybox_pop_state(skybox_t *sky);
void skybox_destroy(skybox_t *sky);
// -----------------------------------------------------------------------------
// post-fxs
void viewport_color(vec3 color);
void viewport_clear(bool color, bool depth);
void viewport_clip(vec2 from, vec2 to);
void fx_load(const char *file);
void fx_begin();
void fx_end();
void fx_enable(int pass, int enabled);
int fx_enabled(int pass);
void fx_enable_all(int enabled);
char * fx_name(int pass);
// -----------------------------------------------------------------------------
// utils
void* screenshot(unsigned components); // 3 RGB, 4 RGBA, -3 BGR, -4 BGRA
#endif // RENDER_H
#ifdef RENDER_C
#pragma once
// -----------------------------------------------------------------------------
// opengl
#define GL_COMPRESSED_RGB_S3TC_DXT1_EXT 0x83F0
#define GL_COMPRESSED_RGBA_S3TC_DXT1_EXT 0x83F1
#define GL_COMPRESSED_RGBA_S3TC_DXT3_EXT 0x83F2
#define GL_COMPRESSED_RGBA_S3TC_DXT5_EXT 0x83F3
#define GL_DEBUG_SEVERITY_HIGH 0x9146
#define GL_DEBUG_SEVERITY_NOTIFICATION 0x826B
#define GL_DEBUG_SOURCE_API 0x8246
#define GL_DEBUG_TYPE_ERROR 0x824C
//
void glDebugCallback(uint32_t source, uint32_t type, uint32_t id, uint32_t severity, int32_t length, const char * message, void * userdata) {
// whitelisted codes (also: 131169, 131204).
if( id == 131154 ) return; // Pixel-path performance warning: Pixel transfer is synchronized with 3D rendering.
if( id == 131185 ) return; // Buffer object 2 (bound to GL_ELEMENT_ARRAY_BUFFER_ARB, usage hint is GL_STATIC_DRAW) will use VIDEO memory as the source for buffer object operations
if( id == 131218 ) return; // Program/shader state performance warning: Vertex shader in program 9 is being recompiled based on GL state.
if( id == 2 ) return; // INFO: API_ID_RECOMPILE_FRAGMENT_SHADER performance warning has been generated. Fragment shader recompiled due to state change. [ID: 2]
const char * GL_ERROR_SOURCE[] = { "API", "WINDOW SYSTEM", "SHADER COMPILER", "THIRD PARTY", "APPLICATION", "OTHER" };
const char * GL_ERROR_SEVERITY[] = { "HIGH", "MEDIUM", "LOW", "NOTIFICATION" };
const char * GL_ERROR_TYPE[] = { "ERROR", "DEPRECATED BEHAVIOR", "UNDEFINED DEHAVIOUR", "PORTABILITY", "PERFORMANCE", "OTHER" };
severity = severity == GL_DEBUG_SEVERITY_NOTIFICATION ? 3 : severity - GL_DEBUG_SEVERITY_HIGH;
source = source - GL_DEBUG_SOURCE_API;
type = type - GL_DEBUG_TYPE_ERROR;
PRINTF( "!%s [ID: %u]\n", message, id );
// PANIC( "!%s [ID: %u]\n", message, id );
}
void glDebugEnable() {
do_once {
typedef void (*GLDEBUGPROC)(uint32_t, uint32_t, uint32_t, uint32_t, int32_t, const char *, const void *);
typedef void (*GLDEBUGMESSAGECALLBACKPROC)(GLDEBUGPROC, const void *);
void (*glDebugMessageCallback)(GLDEBUGPROC, const void *) = (GLDEBUGMESSAGECALLBACKPROC)glfwGetProcAddress("glDebugMessageCallback");
glEnable(GL_DEBUG_OUTPUT_SYNCHRONOUS_ARB);
glDebugMessageCallback((GLDEBUGPROC)glDebugCallback, NULL);
}
}
void glNewFrame() {
glViewport(0, 0, window_width(), window_height());
//glClearColor(0,0,0,1);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glEnable(GL_BLEND);
}
// ----------------------------------------------------------------------------
// embedded shaders (@fixme: promote to files?)
static const char *const fs_0_0_shadowmap_lit =
// "#version 140 // inverse() requires v140\n"
"//" FILELINE "\n"
// "uniform mat4 view = mat4(1.0);\n"
"uniform vec3 lightPos = vec3(1.0);\n"
"uniform float doTexture = 1.;\n"
#if VSMCUBE
"uniform samplerCube shadowMap;\n" // VSMCUBE
#else
"uniform sampler2D shadowMap;\n" // !VSMCUBE
#endif
"struct light {\n"
" vec3 position; // world-space\n"
" vec4 diffuse;\n"
" vec4 specular;\n"
" float constantAttenuation, linearAttenuation, quadraticAttenuation;\n"
"};\n"
"light light0 = light(\n"
" lightPos,\n"
" vec4(1,1,1,1), // diffuse\n"
" vec4(1,1,1,1), // specular\n"
" 1.0, 0.0, 0.0 // attenuation (const, linear, quad)\n"
");\n"
"// From http://fabiensanglard.net/shadowmappingVSM/index.php\n"
#if VSMCUBE
"float chebyshevUpperBound(float distance, vec3 dir) {\n"
" distance = distance/20 ;\n"
" vec2 moments = texture(shadowMap, dir).rg;\n"
#else
"float chebyshevUpperBound(float distance, vec4 scPostW) {\n"
" vec2 moments = texture(shadowMap,scPostW.xy).rg;\n"
#endif
" // Surface is fully lit. as the current fragment is before the light occluder\n"
" if (distance <= moments.x)\n"
" return 1.0;\n"
" // The fragment is either in shadow or penumbra. We now use chebyshev's upperBound to check\n"
" // How likely this pixel is to be lit (p_max)\n"
" float variance = moments.y - (moments.x*moments.x);\n"
" //variance = max(variance, 0.000002);\n"
" variance = max(variance, 0.00002);\n"
" float d = distance - moments.x;\n"
" float p_max = variance / (variance + d*d);\n"
" return p_max;\n"
"}\n"
"vec4 shadowmap(in vec4 vpeye, in vec4 vneye, in vec2 uv, in vec4 sc) {\n"
#ifndef VSMCUBE
" return vec4(1.);\n"
#endif
" vec3 fragment = vec3(vpeye);\n"
" vec3 normal = vec3(normalize(vneye));\n"
" vec3 viewDir = normalize(-fragment);\n"
#if VSMCUBE
" // Diffuse lighting\n"
" // Convert to eye-space (TODO could precompute)\n"
" vec3 light_ = vec3(view * vec4(light0.position, 1.0));\n"
" // Vectors\n"
" vec3 fragmentToLight = light_ - fragment;\n"
" vec3 fragmentToLightDir = normalize(fragmentToLight);\n"
" // Shadows\n"
" vec4 fragmentToLight_world = inverse(view) * vec4(fragmentToLightDir, 0.0);\n"
" float shadowFactor = chebyshevUpperBound(length(fragmentToLight), -fragmentToLight_world.xyz);\n"
#else
" // Shadows\n"
" vec4 scPostW = sc / sc.w;\n"
" scPostW = scPostW * 0.5 + 0.5;\n"
" float shadowFactor = 1.0; // Not in shadow\n"
" bool outsideShadowMap = sc.w <= 0.0f || (scPostW.x < 0 || scPostW.y < 0) || (scPostW.x >= 1 || scPostW.y >= 1);\n"
" if (!outsideShadowMap) {\n"
" shadowFactor = chebyshevUpperBound(scPostW.z, scPostW);\n"
" }\n"
#endif
" // Lighting\n"
" // Convert to eye-space\n"
" vec3 light = vec3(view * vec4(light0.position, 1.0));\n"
" vec4 diffColor = vec4(1,1,1,1);\n"
#if VSMCUBE
" if(doTexture != 0) diffColor = vec4(vec3(texture(shadowMap, -fragmentToLight_world.xyz).r), 1.0);\n"
#else
" if(doTexture != 0) diffColor = vec4(vec3(texture(shadowMap, vec2(uv.x, 1.0 - uv.y)).r), 1.0);\n"
#endif
#if 1
" vec3 positionToLight = light - fragment;\n"
" vec3 lightDir = normalize(positionToLight);\n"
" // Angle between fragment-normal and incoming light\n"
" float cosAngIncidence = dot(lightDir, normal);\n"
" cosAngIncidence = clamp(cosAngIncidence, 0, 1);\n"
" float attenuation = 1.0f;\n"
" attenuation = 1.0 / (light0.constantAttenuation + light0.linearAttenuation * length(positionToLight) + light0.quadraticAttenuation * pow(length(positionToLight),2));\n"
" vec4 diffuse = diffColor * light0.diffuse * cosAngIncidence * attenuation;\n"
" vec4 total_lighting;\n"
" total_lighting += vec4(0.1, 0.1, 0.1, 1.0) * diffColor; // Ambient\n"
" total_lighting += diffuse * shadowFactor; // Diffuse\n"
#else
" vec4 total_lighting = diffColor;\n"
#endif
" return vec4(clamp(vec3(total_lighting), 0., 1.), 1.0);\n"
"};\n";
static const char *const fs_0_0_shadowmap_unlit = "//" FILELINE "\n"
// "uniform mat4 view = mat4(1.0);\n"
"uniform vec3 lightPos = vec3(1.0);\n"
"uniform float doTexture = 0.;\n"
"uniform sampler2D shadowMap;\n"
"vec4 shadowmap(in vec4 vpeye, in vec4 vneye, in vec2 Texcoord, in vec4 sc) {\n"
" return vec4(1.);\n"
"};\n";
static const char *const vs_3_3_skybox = "//" FILELINE "\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_position;\n"
"out vec3 v_direction;\n"
"void main() {\n"
" vec4 position = u_mvp * vec4(att_position, 0.0);\n"
" gl_Position = position.xyww;\n"
" v_direction = att_position;\n"
"}\n";
static const char *const fs_3_4_skybox = "//" FILELINE "\n"
"uniform samplerCube u_cubemap;\n"
"in vec3 v_direction;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" fragcolor = vec4(texture(u_cubemap, v_direction).rgb, 1.0);\n"
"}\n";
static const char *const fs_3_4_skybox_rayleigh = "//" FILELINE "\n"
"uniform vec3 uSunPos = vec3( 0, 0.1, -1 ); // = [0, Math.cos(theta) * 0.3 + 0.2, -1];\n"
"in vec3 v_direction;\n"
"out vec4 fragcolor;\n"
"vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g);\n"
"void main() {\n"
" vec3 color = atmosphere(\n"
" normalize(v_direction), // normalized ray direction\n"
" vec3(0,6372e3,0), // ray origin\n"
" uSunPos, // position of the sun\n"
" 22.0, // intensity of the sun\n"
" 6371e3, // radius of the planet in meters\n"
" 6471e3, // radius of the atmosphere in meters\n"
" vec3(5.5e-6, 13.0e-6, 22.4e-6), // Rayleigh scattering coefficient\n"
" 21e-6, // Mie scattering coefficient\n"
" 8e3, // Rayleigh scale height\n"
" 1.2e3, // Mie scale height\n"
" 0.758 // Mie preferred scattering direction\n"
" );\n"
" // Apply exposure.\n"
" color = 1.0 - exp(-1.0 * color);\n"
" fragcolor = vec4(color, 1);\n"
"}\n"
"// [src] https://github.com/wwwtyro/glsl-atmosphere by wwwtyro (Unlicensed)\n"
"// For more information, please refer to <http://unlicense.org>\n"
"#define PI 3.141592\n"
"#define iSteps 16\n"
"#define jSteps 8\n"
"vec2 rsi(vec3 r0, vec3 rd, float sr) {\n"
" // ray-sphere intersection that assumes\n"
" // the sphere is centered at the origin.\n"
" // No intersection when result.x > result.y\n"
" float a = dot(rd, rd);\n"
" float b = 2.0 * dot(rd, r0);\n"
" float c = dot(r0, r0) - (sr * sr);\n"
" float d = (b*b) - 4.0*a*c;\n"
" if (d < 0.0) return vec2(1e5,-1e5);\n"
" return vec2(\n"
" (-b - sqrt(d))/(2.0*a),\n"
" (-b + sqrt(d))/(2.0*a)\n"
" );\n"
"}\n"
"vec3 atmosphere(vec3 r, vec3 r0, vec3 pSun, float iSun, float rPlanet, float rAtmos, vec3 kRlh, float kMie, float shRlh, float shMie, float g) {\n"
" // Normalize the sun and view directions.\n"
" pSun = normalize(pSun);\n"
" r = normalize(r);\n"
" // Calculate the step size of the primary ray.\n"
" vec2 p = rsi(r0, r, rAtmos);\n"
" if (p.x > p.y) return vec3(0,0,0);\n"
" p.y = min(p.y, rsi(r0, r, rPlanet).x);\n"
" float iStepSize = (p.y - p.x) / float(iSteps);\n"
" // Initialize the primary ray time.\n"
" float iTime = 0.0;\n"
" // Initialize accumulators for Rayleigh and Mie scattering.\n"
" vec3 totalRlh = vec3(0,0,0);\n"
" vec3 totalMie = vec3(0,0,0);\n"
" // Initialize optical depth accumulators for the primary ray.\n"
" float iOdRlh = 0.0;\n"
" float iOdMie = 0.0;\n"
" // Calculate the Rayleigh and Mie phases.\n"
" float mu = dot(r, pSun);\n"
" float mumu = mu * mu;\n"
" float gg = g * g;\n"
" float pRlh = 3.0 / (16.0 * PI) * (1.0 + mumu);\n"
" float pMie = 3.0 / (8.0 * PI) * ((1.0 - gg) * (mumu + 1.0)) / (pow(1.0 + gg - 2.0 * mu * g, 1.5) * (2.0 + gg));\n"
" // Sample the primary ray.\n"
" for (int i = 0; i < iSteps; i++) {\n"
" // Calculate the primary ray sample position.\n"
" vec3 iPos = r0 + r * (iTime + iStepSize * 0.5);\n"
" // Calculate the height of the sample.\n"
" float iHeight = length(iPos) - rPlanet;\n"
" // Calculate the optical depth of the Rayleigh and Mie scattering for this step.\n"
" float odStepRlh = exp(-iHeight / shRlh) * iStepSize;\n"
" float odStepMie = exp(-iHeight / shMie) * iStepSize;\n"
" // Accumulate optical depth.\n"
" iOdRlh += odStepRlh;\n"
" iOdMie += odStepMie;\n"
" // Calculate the step size of the secondary ray.\n"
" float jStepSize = rsi(iPos, pSun, rAtmos).y / float(jSteps);\n"
" // Initialize the secondary ray time.\n"
" float jTime = 0.0;\n"
" // Initialize optical depth accumulators for the secondary ray.\n"
" float jOdRlh = 0.0;\n"
" float jOdMie = 0.0;\n"
" // Sample the secondary ray.\n"
" for (int j = 0; j < jSteps; j++) {\n"
" // Calculate the secondary ray sample position.\n"
" vec3 jPos = iPos + pSun * (jTime + jStepSize * 0.5);\n"
" // Calculate the height of the sample.\n"
" float jHeight = length(jPos) - rPlanet;\n"
" // Accumulate the optical depth.\n"
" jOdRlh += exp(-jHeight / shRlh) * jStepSize;\n"
" jOdMie += exp(-jHeight / shMie) * jStepSize;\n"
" // Increment the secondary ray time.\n"
" jTime += jStepSize;\n"
" }\n"
" // Calculate attenuation.\n"
" vec3 attn = exp(-(kMie * (iOdMie + jOdMie) + kRlh * (iOdRlh + jOdRlh)));\n"
" // Accumulate scattering.\n"
" totalRlh += odStepRlh * attn;\n"
" totalMie += odStepMie * attn;\n"
" // Increment the primary ray time.\n"
" iTime += iStepSize;\n"
" }\n"
" // Calculate and return the final color.\n"
" return iSun * (pRlh * kRlh * totalRlh + pMie * kMie * totalMie);\n"
"}\n";
static const char *const vs_332_32 = "//" FILELINE "\n"
//"uniform mat4 u_model, u_view, u_proj;\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_position;\n"
"in vec3 att_normal;\n"
"in vec2 att_texcoord;\n"
"out vec3 v_normal;\n"
"out vec3 v_normal_ws;\n"
"out vec2 v_texcoord;\n"
// shadow
"uniform mat4 model, view, proj;\n"
"uniform mat4 cameraToShadowProjector;\n" // !VSMCUBE
"out vec4 vneye;\n"
"out vec4 vpeye;\n"
"out vec4 sc;\n" // !VSMCUBE
"void do_shadow() {\n"
" vneye = view * model * vec4(att_normal, 0.0f);\n"
" vpeye = view * model * vec4(att_position, 1.0);\n"
" sc = cameraToShadowProjector * model * vec4(att_position, 1.0f);\n"
"}\n"
"void main() {\n"
//" gl_Position = proj * view * model * vec4(att_position, 1.0);\n"
" gl_Position = u_mvp * vec4(att_position, 1.0);\n"
" v_normal = normalize(att_normal);\n"
" v_normal_ws = normalize(vec3(model * vec4(att_normal, 0.)));\n" // normal world/model space
" v_texcoord = att_texcoord;\n"
" do_shadow();\n"
"}";
static const char *const vs_0_2_fullscreen_quad_A = "//" FILELINE "\n"
"out vec2 texcoord;\n"
"void main() {\n"
" texcoord = vec2( (gl_VertexID << 1) & 2, gl_VertexID & 2 );\n"
" gl_Position = vec4( texCoord * 2.0 - 1.0, 0.0, 1.0 );\n"
"}\n";
static const char *const vs_0_2_fullscreen_quad_B = "//" FILELINE "\n"
"out vec2 uv;\n"
"void main() {\n"
" float x = float(((uint(gl_VertexID) + 2u) / 3u)%2u); \n"
" float y = float(((uint(gl_VertexID) + 1u) / 3u)%2u); \n"
" gl_Position = vec4(-1.0 + x*2.0, 0.0+(-1.0+y*2.0), 0.0, 1.0);\n" // normal(0+),flipped(0-)
" uv = vec2(x, y);\n" // normal(y),flipped(1.0-y)
"}\n";
static const char *const vs_0_2_fullscreen_quad_B_flipped = "//" FILELINE "\n"
"out vec2 uv;\n"
"void main() {\n"
" float x = float(((uint(gl_VertexID) + 2u) / 3u)%2u); \n"
" float y = float(((uint(gl_VertexID) + 1u) / 3u)%2u); \n"
" gl_Position = vec4(-1.0 + x*2.0, 0.0-(-1.0+y*2.0), 0.0, 1.0);\n" // normal(0+),flipped(0-)
" uv = vec2(x, y);\n" // normal(y),flipped(1.0-y)
"}\n";
/*
"out vec2 uv;\n"
"void main() {\n"
" float x = gl_VertexID / 2;\n"
" float y = gl_VertexID % 2;\n"
" uv = vec2(x, y);\n"
" gl_Position = vec4(2.0*uv - 1.0, 0.0, 1.0);\n"
"}\n";
*/
static const char *const fs_2_4_texel_inv_gamma = "//" FILELINE "\n"
"uniform sampler2D texture0; /*unit0*/\n"
"uniform float u_inv_gamma;\n"
"in vec2 uv;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" vec4 texel = texture( texture0, uv );\n"
" fragcolor = texel;\n"
" fragcolor.rgb = pow( fragcolor.rgb, vec3( u_inv_gamma ) );\n" // defaults: 1.0/2.2 gamma
"}\n";
static const char *const vs_32344443_332_model = "//" FILELINE "\n"
"#ifndef MAX_BONES\n"
"#define MAX_BONES 110\n"
"#endif\n"
"uniform mat3x4 vsBoneMatrix[MAX_BONES];\n"
"uniform bool SKINNED = false;\n"
// "uniform mat4 M;\n" // RIM
"uniform mat4 MVP;\n"
"in vec3 att_position;\n"
"in vec2 att_texcoord;\n"
"in vec3 att_normal;\n"
"in vec4 att_tangent;\n"
"in vec4 att_indexes;\n"
"in vec4 att_weights;\n"
"in vec4 att_color;\n"
"in vec3 att_bitangent;\n"
"out vec3 v_position;\n"
"out vec3 v_normal, v_normal_ws;\n"
"out vec2 v_texcoord;\n"
// shadow
"uniform mat4 model, view;\n"
"uniform mat4 cameraToShadowProjector;\n"
"out vec4 vneye;\n"
"out vec4 vpeye;\n"
"out vec4 sc;\n"
"void do_shadow() {\n"
" vneye = view * model * vec4(att_normal, 0.0f);\n"
" vpeye = view * model * vec4(att_position, 1.0);\n"
" sc = cameraToShadowProjector * model * vec4(att_position, 1.0f);\n"
"}\n"
"void main() {\n"
" vec3 objPos;\n"
" if(!SKINNED) {\n"
" objPos = att_position;\n"
" v_normal = att_normal;\n"
" } else {\n"
" mat3x4 m = vsBoneMatrix[int(att_indexes.x)] * att_weights.x;\n"
" m += vsBoneMatrix[int(att_indexes.y)] * att_weights.y;\n"
" m += vsBoneMatrix[int(att_indexes.z)] * att_weights.z;\n"
" m += vsBoneMatrix[int(att_indexes.w)] * att_weights.w;\n"
" objPos = vec4(att_position, 1.0) * m;\n"
" v_normal = vec4(att_normal, 0.0) * m;\n"
" //@todo: tangents\n"
" }\n"
" v_normal_ws = normalize(vec3(model * vec4(v_normal, 0.)));\n" // normal to world/model space
" v_normal = normalize(v_normal);\n"
" v_position = att_position;\n"
" v_texcoord = att_texcoord;\n"
" gl_Position = MVP * vec4( objPos, 1.0 );\n"
" do_shadow();\n"
"}\n";
#if 0
static const char *const fs_32_4_model_basic = "//" FILELINE "\n"
"uniform sampler2D fsDiffTex;\n"
"uniform sampler2D fsNormalTex;\n"
"uniform sampler2D fsPositionTex;\n"
"uniform mat4 MVP;\n"
"in vec3 v_normal;\n"
"in vec2 v_texcoord;\n"
"out vec4 fragColor;\n"
"void main() {\n"
" vec4 diff = texture(fsDiffTex, v_texcoord).rgba;\n"
" vec3 n = normalize(mat3(MVP) * v_normal); // transform normal to eye space\n"
" fragColor = diff;// * vec4(v_normal.xyz, 1);\n"
"}\n";
#endif
static const char *const fs_32_4_model = "//" FILELINE "\n"
"uniform mat4 model, view;\n"
"uniform sampler2D u_texture2d;\n"
"uniform vec3 u_coefficients_sh[9];\n"
"uniform bool u_textured = true;\n"
"uniform bool u_lit = false;\n"
"#ifdef RIM\n"
"in vec3 v_position;\n"
"#endif\n"
"in vec3 v_normal, v_normal_ws;\n"
"in vec2 v_texcoord;\n"
"out vec4 fragcolor;\n"
"{{include-shadowmap}}\n"
"in vec4 vpeye;\n"
"in vec4 vneye;\n"
"in vec4 sc;\n"
"vec4 get_shadow() {\n"
" return shadowmap(vpeye, vneye, v_texcoord, sc);\n"
"}\n"
"void main() {\n"
" vec3 n = /*normalize*/(v_normal);\n"
" vec3 SHLightResult[9];\n"
" SHLightResult[0] = 0.282095f * u_coefficients_sh[0];\n"
" SHLightResult[1] = -0.488603f * u_coefficients_sh[1] * n.y;\n"
" SHLightResult[2] = 0.488603f * u_coefficients_sh[2] * n.z;\n"
" SHLightResult[3] = -0.488603f * u_coefficients_sh[3] * n.x;\n"
" SHLightResult[4] = 1.092548f * u_coefficients_sh[4] * n.x * n.y;\n"
" SHLightResult[5] = -1.092548f * u_coefficients_sh[5] * n.y * n.z;\n"
" SHLightResult[6] = 0.315392f * u_coefficients_sh[6] * (3.0f * n.z * n.z - 1.0f);\n"
" SHLightResult[7] = -1.092548f * u_coefficients_sh[7] * n.x * n.z;\n"
" SHLightResult[8] = 0.546274f * u_coefficients_sh[8] * (n.x * n.x - n.y * n.y);\n"
" vec3 result = vec3(0.0);\n"
" for (int i = 0; i < 9; ++i)\n"
" result += SHLightResult[i];\n"
// lighting
" if(u_textured && u_lit) fragcolor = texture(u_texture2d, v_texcoord) * vec4(result, 1.0);\n" // diffuse + lit
" else if(u_textured) fragcolor = texture(u_texture2d, v_texcoord);\n" // diffuse only
" else fragcolor = vec4(result, 1.0);\n" // lit only
// shadowing
"fragcolor *= get_shadow();\n"
// matcap
// "vec2 muv = vec2(view * vec4(v_normal_ws, 0))*0.5+vec2(0.5,0.5);\n" // normal (model space) to view space
// "fragcolor = texture(u_texture2d, vec2(muv.x, 1.0-muv.y));\n"
// rimlight
"#ifdef RIM\n"
" {vec3 n = normalize(mat3(M) * v_normal); // convert normal to view space\n"
" vec3 p = (M * vec4(v_position,1.0)).xyz; // convert position to view space\n"
" vec3 v = normalize(-p); // eye vector\n"
" float rim = 1.0 - max(dot(v, n), 0.0); // rimlight\n"
" rim = smoothstep(1.0-0.01, 1.0, rim); // intensity (0.01)\n"
" fragcolor += vec4(0.0, 0.0, rim, 1.0);} // blue\n"
"#endif\n"
"}\n";
static const char *const fs_2_4_texel_ycbr_gamma_saturation = "//" FILELINE "\n"
"uniform sampler2D u_texture_y; /*unit0*/\n"
"uniform sampler2D u_texture_cb; /*unit1*/\n"
"uniform sampler2D u_texture_cr; /*unit2*/\n"
"uniform float u_gamma;\n"
"in vec2 uv;\n"
"out vec4 fragcolor;\n"
"void main() {\n"
" float y = texture(u_texture_y, uv).r;\n"
" float cb = texture(u_texture_cb, uv).r;\n"
" float cr = texture(u_texture_cr, uv).r;\n"
" const mat4 to_rgb = mat4(\n"
" 1.0000, 1.0000, 1.0000, 0.0000,\n"
" 0.0000, -0.3441, 1.7720, 0.0000,\n"
" 1.4020, -0.7141, 0.0000, 0.0000,\n"
" -0.7010, 0.5291, -0.8860, 1.0000\n"
" );\n"
" vec4 texel = to_rgb * vec4(y, cb, cr, 1.0);\n"
/* same as:
" vec3 yCbCr = vec3(y,cb-0.5,cr-0.5);\n"
" vec4 texel = vec4( dot( vec3( 1.0, 0.0, 1.402 ), yCbCr ),\n"
" dot( vec3( 1.0 , -0.34414 , -0.71414 ), yCbCr ),\n"
" dot( vec3( 1.0, 1.772, 0.0 ), yCbCr ), 1.0);\n"
*/
" // gamma correction\n"
" texel.rgb = pow(texel.rgb, vec3(1.0 / u_gamma));\n"
" // saturation (algorithm from Chapter 16 of OpenGL Shading Language)\n"
" if(false) { float saturation = 2.0; const vec3 W = vec3(0.2125, 0.7154, 0.0721);\n"
" vec3 intensity = vec3(dot(texel.rgb, W));\n"
" texel.rgb = mix(intensity, texel.rgb, saturation); }\n"
" fragcolor = vec4(texel.rgb, 1.0);\n"
"}\n";
static const char *const vs_324_24_sprite = "//" FILELINE "\n"
"uniform mat4 u_mvp;\n"
"in vec3 att_Position;\n"
"in vec2 att_TexCoord;\n"
"in vec4 att_Color;\n"
"out vec2 vTexCoord;\n"
"out vec4 vColor;\n"
"void main() {\n"
" vColor = att_Color;\n"
" vTexCoord = att_TexCoord;\n"
" gl_Position = u_mvp * vec4(att_Position, 1.0);\n"
"}\n";
static const char *const fs_24_4_sprite = "//" FILELINE "\n"
"uniform sampler2D u_texture;\n"
"in vec2 vTexCoord;\n"
"in vec4 vColor;\n"
"out vec4 fragColor;\n"
"void main() {\n"
" vec4 texColor = texture(u_texture, vTexCoord);\n"
"texColor = vColor * texColor;\n"
"if(texColor.a < 0.5) discard;"
" fragColor = texColor;\n"
"}\n";
static const char *const fs_2_4_preamble = "//" FILELINE "\n"
"#define texture2D texture\n"
"#define texture2DLod textureLod\n"
"#define FRAGCOLOR fragColor\n"
"#define texcoord uv\n"
"#define TEXCOORD uv\n"
"uniform sampler2D iChannel0;\n"
"uniform sampler2D iChannel1;\n"
"uniform float iWidth, iHeight, iTime, iFrame, iMousex, iMousey;\n"
"uniform float iChannelRes0x, iChannelRes0y;\n"
"uniform float iChannelRes1x, iChannelRes1y;\n"
"vec2 iResolution = vec2(iWidth, iHeight);\n"
"vec2 iMouse = vec2(iMousex, iMousey);\n"
"vec2 iChannelResolution[2] = vec2[2]( vec2(iChannelRes0x, iChannelRes0y),vec2(iChannelRes1x, iChannelRes1y) );\n"
"float iGlobalTime = iTime;\n"
"in vec2 texcoord;\n"
"out vec4 fragColor;\n";
static const char *const fs_main_shadertoy = "//" FILELINE "\n"
"void mainImage( out vec4 fragColor, in vec2 fragCoord );\n"
"void main() {\n"
" mainImage(fragColor, texcoord.xy * iResolution);\n"
"}\n";
// ----------------------------------------------------------------------------
// shaders
void shader_print(const char *source) {
for(int line = 0, i = 0; source[i] > 0; ) {
printf("\t%03d: ", line+1);
while( source[i] >= 32 || source[i] == '\t' ) fputc(source[i++], stdout);
while( source[i] > 0 && source[i] < 32 ) line += source[i++] == '\n';
puts("");
}
}
static
GLuint shader_compile( GLenum type, const char *source ) {
GLuint shader = glCreateShader(type);
glShaderSource(shader, 1, (const char **)&source, NULL);
glCompileShader(shader);
GLint status = GL_FALSE, length;
glGetShaderiv(shader, GL_COMPILE_STATUS, &status);
if( status == GL_FALSE ) {
glGetShaderiv(shader, GL_INFO_LOG_LENGTH, &length);
ASSERT(length < 2048);
char buf[2048] = { 0 };
glGetShaderInfoLog(shader, length, NULL, buf);
// dump log with line numbers
shader_print( source );
PANIC("ERROR: shader_compile(): %s\n%s\n", type == GL_VERTEX_SHADER ? "Vertex" : "Fragment", buf);
return 0;
}
return shader;
}
unsigned shader(const char *vs, const char *fs, const char *attribs, const char *fragcolor) {
PRINTF("Compiling shader\n");
fs = fs[0] == '#' && fs[1] == 'v' ? fs : stringf("#version 140\n%s", fs);
vs = vs[0] == '#' && vs[1] == 'v' ? vs : stringf("#version 140\n%s", vs);
GLuint vert = shader_compile(GL_VERTEX_SHADER, vs);
GLuint frag = shader_compile(GL_FRAGMENT_SHADER, fs);
//GLuint geom = shader_compile(GL_GEOMETRY_SHADER, gs);
GLuint program = 0;
if( vert && frag ) {
program = glCreateProgram();
glAttachShader(program, vert);
glAttachShader(program, frag);
// glAttachShader(program, geom);
for( int i = 0; attribs && attribs[0]; ++i ) {
char attrib[128] = {0};
sscanf(attribs, "%127[^,]", attrib);
while( attribs[0] && attribs[0] != ',' ) { attribs++; }
while( attribs[0] && attribs[0] == ',' ) { attribs++; break; }
if(!attrib[0]) continue;
glBindAttribLocation(program, i, attrib);
PRINTF("Shader.attribute[%d]=%s\n", i, attrib);
}
glBindFragDataLocation(program, 0, fragcolor);
glLinkProgram(program);
GLint status = GL_FALSE, length;
glGetProgramiv(program, GL_LINK_STATUS, &status);
#ifdef DEBUG_SHADER
if (status != GL_FALSE && program == DEBUG_SHADER) {
#else
if (status == GL_FALSE) {
#endif
glGetProgramiv(program, GL_INFO_LOG_LENGTH, &length);
ASSERT(length < 2048);
char buf[2048] = { 0 };
glGetProgramInfoLog(program, length, NULL, buf);
puts("--- vs:");
shader_print(vs);
puts("--- fs:");
shader_print(fs);
}
if (status == GL_FALSE) {
PANIC("ERROR: shader(): Shader/program link: %s\n", buf);
return 0;
}
// glDetachShader(program, vert);
// glDetachShader(program, frag);
// glDetachShader(program, geom);
glDeleteShader(vert);
glDeleteShader(frag);
// glDeleteShader(geom);
//#ifdef DEBUG_ANY_SHADER
// PRINTF("Shader #%d:\n", program);
// shader_print(vs);
// shader_print(fs);
//#endif
}
return program;
}
void shader_destroy(unsigned program){
glDeleteProgram(program);
}
unsigned last_shader = -1;
static
int shader_uniform(const char *name) {
int ret = glGetUniformLocation(last_shader, name);
if( ret < 0 ) PRINTF("!cannot find uniform '%s' in shader program %d\n", name, (int)last_shader );
return ret;
}
unsigned shader_get_active() { return last_shader; }
unsigned shader_bind(unsigned program) { unsigned ret = last_shader; return glUseProgram(last_shader = program), ret; }
void shader_int(const char *uniform, int i) { glUniform1i(shader_uniform(uniform), i); }
void shader_float(const char *uniform, float f) { glUniform1f(shader_uniform(uniform), f); }
void shader_vec2(const char *uniform, vec2 v) { glUniform2fv(shader_uniform(uniform), 1, &v.x); }
void shader_vec3(const char *uniform, vec3 v) { glUniform3fv(shader_uniform(uniform), 1, &v.x); }
void shader_vec4(const char *uniform, vec4 v) { glUniform4fv(shader_uniform(uniform), 1, &v.x); }
void shader_mat44(const char *uniform, mat44 m) { glUniformMatrix4fv(shader_uniform(uniform), 1, GL_FALSE/*GL_TRUE*/, m); }
void shader_texture(const char *sampler, unsigned texture, unsigned unit) { glBindTexture(GL_TEXTURE_2D, texture); glActiveTexture(GL_TEXTURE0 + unit); glUniform1i(shader_uniform(sampler), unit); }
void shader_cubemap(const char *sampler, unsigned texture) { glUniform1i(shader_uniform(sampler), 0); glBindTexture(GL_TEXTURE_CUBE_MAP, texture); }
// -----------------------------------------------------------------------------
// colors
uint32_t rgba( uint8_t r, uint8_t g, uint8_t b, uint8_t a ) {
return r << 24 | g << 16 | b << 8 | a;
}
uint32_t bgra( uint8_t r, uint8_t g, uint8_t b, uint8_t a ) {
return rgba(b,g,r,a);
}
float alpha( uint32_t rgba ) {
return ( rgba & 255 ) / 255.f;
}
// -----------------------------------------------------------------------------
// images
image_t image_create(int x, int y, int flags) {
int n = 3; // defaults to RGB
if(flags & IMAGE_R) n = 1;
if(flags & IMAGE_RG) n = 2;
if(flags & IMAGE_RGB) n = 3;
if(flags & IMAGE_RGBA) n = 4;
image_t img; img.x = x; img.y = y; img.n = n;
img.pixels = REALLOC(0, x * y * n ); // @fixme: image_destroy() requires stbi allocator to match REALLOC
return img;
}
image_t image_from_mem(const char *data, int size, int flags) {
image_t img = {0};
if( data && size ) {
stbi_set_flip_vertically_on_load(flags & IMAGE_FLIP ? 1 : 0);
int n = 0;
if(flags & IMAGE_R) n = 1;
if(flags & IMAGE_RG) n = 2;
if(flags & IMAGE_RGB) n = 3;
if(flags & IMAGE_RGBA) n = 4;
img.pixels = stbi_load_from_memory(data, size, &img.x,&img.y,&img.n, n);
if( img.pixels ) {
PRINTF("Loaded image (%dx%d %.*s->%.*s)\n",img.w,img.h,img.n,"RGBA",n?n:img.n,"RGBA");
} else {
// PANIC("Error loading image (%s)\n", pathfile);
}
img.n = n ? n : img.n;
}
return img;
}
image_t image(const char *pathfile, int flags) {
const char *fname = vfs_find(pathfile);
// if( !fname[0] ) fname = vfs_find(stringf("%s.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.jpg",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.jpg.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga.png",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.png.jpg",pathfile)); // needed?
// if( !fname[0] ) fname = vfs_find(stringf("%s.tga.jpg",pathfile)); // needed?
int size = 0;
char *data = vfs_load(fname, &size);
return image_from_mem(data, size, flags);
}
void image_destroy(image_t *img) {
if(img->pixels) stbi_image_free(img->pixels);
img->pixels = 0; // *img = (image_t){0}; // do not clear fields yet. might be useful in the future.
}
// bilinear interpolation (uv must be in image coords, range [0..w-1,0..h-1])
static
vec3 bilinear(image_t in, vec2 uv) { // image_bilinear_pixel() ?
float w = in.x, h = in.y, u = uv.x, v = uv.y;
float u1 = (int)u, v1 = (int)v, u2 = minf(u1+1, w-1), v2 = minf(v1+1, h-1);
float c1 = u - u1, c2 = v - v1;
uint8_t *p1 = &in.pixels8[ in.n * (int)(u1 + v1 * in.w) ];
uint8_t *p2 = &in.pixels8[ in.n * (int)(u2 + v1 * in.w) ];
uint8_t *p3 = &in.pixels8[ in.n * (int)(u1 + v2 * in.w) ];
uint8_t *p4 = &in.pixels8[ in.n * (int)(u2 + v2 * in.w) ];
vec3 A = vec3( p1[0], p1[1], p1[2] );
vec3 B = vec3( p2[0], p2[1], p2[2] );
vec3 C = vec3( p3[0], p3[1], p3[2] );
vec3 D = vec3( p4[0], p4[1], p4[2] );
return mix3(mix3(A, B, c1), mix3(C, D, c1), c2);
}
// -----------------------------------------------------------------------------
// textures
unsigned texture_update(texture_t *t, unsigned w, unsigned h, unsigned n, void *pixels, int flags) {
ASSERT( t && t->id );
ASSERT( n <= 4 );
GLuint pixel_types[] = { GL_RED, GL_RED, GL_RG, GL_RGB, GL_RGBA, GL_R32F, GL_R32F, GL_RG32F, GL_RGB32F, GL_RGBA32F };
GLenum pixel_storage = flags & TEXTURE_FLOAT ? GL_FLOAT : GL_UNSIGNED_BYTE;
GLuint pixel_type = pixel_types[ n ];
GLuint texel_type = pixel_types[ n + 5 * !!(flags & TEXTURE_FLOAT) ];
GLenum wrap = GL_CLAMP_TO_EDGE;
GLenum min_filter = GL_NEAREST, mag_filter = GL_NEAREST;
// GLfloat color = (flags&7)/7.f, border_color[4] = { color, color, color, 1.f };
if( flags & TEXTURE_BGR ) if( pixel_type == GL_RGB ) pixel_type = GL_BGR;
if( flags & TEXTURE_BGR ) if( pixel_type == GL_RGBA ) pixel_type = GL_BGRA;
if( flags & TEXTURE_SRGB ) if( texel_type == GL_RGB ) texel_type = GL_SRGB;
if( flags & TEXTURE_SRGB ) if( texel_type == GL_RGBA ) texel_type = GL_SRGB_ALPHA;
if( flags & TEXTURE_BC1 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT1_EXT;
if( flags & TEXTURE_BC2 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT3_EXT;
if( flags & TEXTURE_BC3 ) texel_type = GL_COMPRESSED_RGBA_S3TC_DXT5_EXT;
if( flags & TEXTURE_DEPTH ) texel_type = pixel_type = GL_DEPTH_COMPONENT; // GL_DEPTH_COMPONENT32
if( flags & TEXTURE_REPEAT ) wrap = GL_REPEAT;
if( flags & TEXTURE_BORDER ) wrap = GL_CLAMP_TO_BORDER;
if( flags & TEXTURE_LINEAR ) min_filter = GL_LINEAR, mag_filter = GL_LINEAR;
if( flags & TEXTURE_MIPMAPS ) min_filter = flags & TEXTURE_LINEAR ? GL_LINEAR_MIPMAP_LINEAR : GL_NEAREST_MIPMAP_LINEAR;
if( flags & TEXTURE_MIPMAPS ) mag_filter = flags & TEXTURE_LINEAR ? GL_LINEAR : GL_NEAREST;
if( 0 ) { // flags & TEXTURE_PREMULTIPLY_ALPHA )
uint8_t *p = pixels;
if(n == 2) for( unsigned i = 0; i < 2*w*h; i += 2 ) {
p[i] = (p[i] * p[i+1] + 128) >> 8;
}
if(n == 4) for( unsigned i = 0; i < 4*w*h; i += 4 ) {
p[i+0] = (p[i+0] * p[i+3] + 128) >> 8;
p[i+1] = (p[i+1] * p[i+3] + 128) >> 8;
p[i+2] = (p[i+2] * p[i+3] + 128) >> 8;
}
}
GLenum texture_type = t->flags & TEXTURE_ARRAY ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D; // @fixme: test GL_TEXTURE_2D_ARRAY
//glPixelStorei( GL_UNPACK_ALIGNMENT, n < 4 ? 1 : 4 ); // for framebuffer reading
//glActiveTexture(GL_TEXTURE0 + (flags&7));
glBindTexture(texture_type, t->id);
glTexImage2D(texture_type, 0, texel_type, w, h, 0, pixel_type, pixel_storage, pixels);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_S, wrap);
glTexParameteri(texture_type, GL_TEXTURE_WRAP_T, wrap);
glTexParameteri(texture_type, GL_TEXTURE_MIN_FILTER, min_filter);
glTexParameteri(texture_type, GL_TEXTURE_MAG_FILTER, mag_filter);
#if 0 // only for sampler2DShadow
if( flags & TEXTURE_DEPTH ) glTexParameteri(texture_type, GL_TEXTURE_COMPARE_MODE, GL_COMPARE_REF_TO_TEXTURE);
if( flags & TEXTURE_DEPTH ) glTexParameteri(texture_type, GL_TEXTURE_COMPARE_FUNC, GL_LEQUAL);
#endif
// if( flags & TEXTURE_BORDER ) glTexParameterfv(texture_type, GL_TEXTURE_BORDER_COLOR, border_color);
if( flags & TEXTURE_MIPMAPS ) glGenerateMipmap(texture_type);
if( flags & TEXTURE_MIPMAPS ) {
GLfloat max_aniso = 0;
// glGetFloatv(GL_MAX_TEXTURE_MAX_ANISOTROPY, &max_aniso);
max_aniso = 4;
// glTexParameterf(texture_type, GL_TEXTURE_MAX_ANISOTROPY, max_aniso);
}
// glBindTexture(texture_type, 0); // do not unbind. current code expects texture to be bound at function exit
t->w = w;
t->h = h;
t->n = n;
t->flags = flags;
return t->id;
}
texture_t texture_create(unsigned w, unsigned h, unsigned n, void *pixels, int flags) {
texture_t texture = {0};
glGenTextures( 1, &texture.id );
texture_update( &texture, w, h, n, pixels, flags );
return texture;
}
texture_t texture_checker() {
static texture_t texture = {0};
if( !texture.id ) {
#if 0
float pixels[] = { 1,0.5,0.5,1 };
texture = texture_create(2,2,1, pixels, TEXTURE_FLOAT|TEXTURE_MIPMAPS|TEXTURE_REPEAT|TEXTURE_BORDER);
#else
uint32_t *pixels = REALLOC(0, 256*256*4);
for (int y = 0, i = 0; y < 256; y++) {
for (int x = 0; x < 256; x++) {
#if 0
extern const uint32_t secret_palette[32];
uint32_t rgb = secret_palette[ y / 8 ] * !!((x ^ y) & 0x8);
pixels[i++] = (rgb>>16) & 255;
pixels[i++] = (rgb>>8) & 255;
pixels[i++] = (rgb>>0) & 255;
pixels[i++] = 255;
#elif 0
extern const uint32_t secret_palette[32];
uint32_t rgb = ((x ^ y) & 0x8) ? secret_palette[6] : secret_palette[ 8 + ((x^y) / (256/6)) ];
pixels[i++] = (rgb>>16) & 255;
pixels[i++] = (rgb>>8) & 255;
pixels[i++] = (rgb>>0) & 255;
pixels[i++] = 255;
#else
extern const uint32_t secret_palette[32];
uint32_t lum = (x^y) & 8 ? 128 : (x^y) & 128 ? 192 : 255;
uint32_t rgb = rgba(lum,lum,lum,255);
pixels[i++] = rgb;
#endif
}
}
texture = texture_create(256,256,4, pixels, TEXTURE_RGBA|TEXTURE_MIPMAPS|TEXTURE_REPEAT|TEXTURE_BORDER);
FREE(pixels);
#endif
}
return texture;
}
texture_t texture_from_mem(const char *ptr, int len, int flags) {
image_t img = image_from_mem(ptr, len, flags);
if( img.pixels ) {
texture_t t = texture_create(img.x, img.y, img.n, img.pixels, flags);
image_destroy(&img);
return t;
}
return texture_checker();
}
texture_t texture(const char *pathfile, int flags) {
// PRINTF("Loading file %s\n", pathfile);
image_t img = image(pathfile, flags);
if( img.pixels ) {
texture_t t = texture_create(img.x, img.y, img.n, img.pixels, flags);
image_destroy(&img);
return t;
}
return texture_checker();
}
void texture_destroy( texture_t *t ) {
if(t->id) glDeleteTextures(1, &t->id);
t->id = 0;
}
// -----------------------------------------------------------------------------
// shadowmaps
shadowmap_t shadowmap(int texture_width) { // = 1024
shadowmap_t s = {0};
s.texture_width = texture_width;
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &s.saved_fb);
glGenFramebuffers(1, &s.fbo);
glBindFramebuffer(GL_FRAMEBUFFER, s.fbo);
glActiveTexture(GL_TEXTURE0);
glGenTextures(1, &s.texture);
glBindTexture(GL_TEXTURE_2D, s.texture);
glTexImage2D(GL_TEXTURE_2D, 0, GL_DEPTH_COMPONENT, texture_width, texture_width, 0, GL_DEPTH_COMPONENT, GL_UNSIGNED_BYTE, 0);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER);
glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, s.texture, 0);
glDrawBuffer(GL_NONE);
glReadBuffer(GL_NONE);
glBindFramebuffer(GL_FRAMEBUFFER, s.saved_fb);
return s;
}
void shadowmap_destroy(shadowmap_t *s) {
if (s->texture) {
glDeleteTextures(1, &s->texture);
}
if (s->fbo) {
glDeleteFramebuffers(1, &s->fbo);
}
shadowmap_t z = {0};
*s = z;
}
void shadowmap_set_shadowmatrix(shadowmap_t *s, vec3 aLightPos, vec3 aLightAt, vec3 aLightUp, const mat44 projection) {
copy44(s->proj, projection);
s->light_position = vec4(aLightPos.x, aLightPos.y, aLightPos.z, 1);
lookat44(s->mv, aLightPos, aLightAt, aLightUp);
mat44 bias = {
0.5, 0.0, 0.0, 0.0,
0.0, 0.5, 0.0, 0.0,
0.0, 0.0, 0.5, 0.0,
0.5, 0.5, 0.5, 1.0 };
// s->shadowmatrix = bias;
// s->shadowmatrix *= s->proj;
// s->shadowmatrix *= s->mv;
// multiply44x3(s->shadowmatrix, s->mv, s->proj, bias);
multiply44x3(s->shadowmatrix, bias, s->proj, s->mv);
// mvp = projection * s->mv;
// multiply44x2(s->mvp, s->mv, projection);
multiply44x2(s->mvp, projection, s->mv);
}
void shadowmap_begin(shadowmap_t *s) {
glGetIntegerv(GL_VIEWPORT, &s->saved_viewport[0]);
glGetIntegerv(GL_DRAW_FRAMEBUFFER_BINDING, &s->saved_fb);
glBindFramebuffer(GL_FRAMEBUFFER, s->fbo);
glViewport(0, 0, s->texture_width, s->texture_width);
glClearDepth(1);
glClear(GL_DEPTH_BUFFER_BIT);
}
void shadowmap_end(shadowmap_t *s) {
glViewport(s->saved_viewport[0], s->saved_viewport[1], s->saved_viewport[2], s->saved_viewport[3]);
glBindFramebuffer(GL_FRAMEBUFFER, s->saved_fb);
}
// shadowmap utils
void shadowmatrix_proj(mat44 shm_proj, float aLightFov, float znear, float zfar) {
perspective44(shm_proj, aLightFov, 1.0f, znear, zfar);
}
void shadowmatrix_ortho(mat44 shm_proj, float left, float right, float bottom, float top, float znear, float zfar) {
ortho44(shm_proj, left, right, bottom, top, znear, zfar);
}
// -----------------------------------------------------------------------------
// fullscreen quads
// usage: bind empty vao & commit call for 6 (quad) or 3 vertices (tri).
// ie, glBindVertexArray(empty_vao); glDrawArrays(GL_TRIANGLES, 0, 3);
void fullscreen_rgb_quad( texture_t texture, float gamma ) {
static int program = -1, vao = -1, u_inv_gamma = -1;
if( program < 0 ) {
const char* vs = vs_0_2_fullscreen_quad_B_flipped;
const char* fs = fs_2_4_texel_inv_gamma;
program = shader(vs, fs, "", "fragcolor" );
u_inv_gamma = glGetUniformLocation(program, "u_inv_gamma");
glGenVertexArrays( 1, &vao );
}
GLenum texture_type = texture.flags & TEXTURE_ARRAY ? GL_TEXTURE_2D_ARRAY : GL_TEXTURE_2D;
// glEnable( GL_BLEND );
glUseProgram( program );
glUniform1f( u_inv_gamma, 1.0f / (gamma + !gamma) );
glBindVertexArray( vao );
glActiveTexture( GL_TEXTURE0 );
glBindTexture( texture_type, texture.id );
glDrawArrays( GL_TRIANGLES, 0, 6 );
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindTexture( texture_type, 0 );
glBindVertexArray( 0 );
glUseProgram( 0 );
// glDisable( GL_BLEND );
}
void fullscreen_ycbcr_quad( texture_t textureYCbCr[3], float gamma ) {
static int program = -1, vao = -1, u_gamma = -1, uy = -1, ucb = -1, ucr = -1;
if( program < 0 ) {
const char* vs = vs_0_2_fullscreen_quad_B_flipped;
const char* fs = fs_2_4_texel_ycbr_gamma_saturation;
program = shader(vs, fs, "", "fragcolor" );
u_gamma = glGetUniformLocation(program, "u_gamma");
uy = glGetUniformLocation(program, "u_texture_y");
ucb = glGetUniformLocation(program, "u_texture_cb");
ucr = glGetUniformLocation(program, "u_texture_cr");
glGenVertexArrays( 1, &vao );
}
// glEnable( GL_BLEND );
glUseProgram( program );
glUniform1f( u_gamma, gamma );
glBindVertexArray( vao );
glUniform1i(uy, 0);
glActiveTexture( GL_TEXTURE0 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[0].id );
glUniform1i(ucb, 1);
glActiveTexture( GL_TEXTURE1 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[1].id );
glUniform1i(ucr, 2);
glActiveTexture( GL_TEXTURE2 );
glBindTexture( GL_TEXTURE_2D, textureYCbCr[2].id );
glDrawArrays( GL_TRIANGLES, 0, 6 );
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindTexture( GL_TEXTURE_2D, 0 );
glBindVertexArray( 0 );
glUseProgram( 0 );
// glDisable( GL_BLEND );
}
// ----------------------------------------------------------------------------
// sprites
typedef struct sprite_t {
int cellw, cellh; // dimensions of any cell in spritesheet
int frame, ncx, ncy; // frame in a (num cellx, num celly) spritesheet
float px, py, pz; // origin x, y, depth
float ox, oy, cos, sin; // offset x, offset y, cos/sin of rotation degree
float sx, sy; // scale x,y
uint32_t rgba; // vertex color
} sprite_t;
// sprite batching
typedef struct batch_t { array(sprite_t) sprites; mesh_t mesh; int dirty; } batch_t;
typedef map(int, batch_t) batch_group_t; // mapkey is anything that forces a flush. texture_id for now, might be texture_id+program_id soon
// sprite stream
typedef struct sprite_vertex { vec3 pos; vec2 uv; uint32_t rgba; } sprite_vertex;
typedef struct sprite_index { GLuint triangle[3]; } sprite_index;
#define sprite_vertex(...) M_CAST(sprite_vertex, __VA_ARGS__)
#define sprite_index(...) M_CAST(sprite_index, __VA_ARGS__)
// sprite impl
static int sprite_count = 0;
static int sprite_program = -1;
static array(sprite_index) sprite_indices = 0;
static array(sprite_vertex) sprite_vertices = 0;
static batch_group_t sprite_additive_group = {0};
static batch_group_t sprite_translucent_group = {0};
void sprite( texture_t texture, float px, float py, float pz, float rot ) {
sprite_ex( texture,
px,py,pz, rot, // position (x,y,depth), rotation angle
0,0, 1,1, // offset (x,y), scale (x,y),
0,~0u, // is_additive, tint color
0, 0,0 // frame num(x) in a (y,z) spritesheet
);
}
void sprite_ex( texture_t texture,
float px, float py, float pz, float rotation,
float ox, float oy, float sx, float sy,
int additive, uint32_t rgba,
float frame, float xcells, float ycells
) {
if (frame < 0) return;
if (frame > 0 && frame >= (xcells * ycells)) return;
// no need to queue if alpha or scale are zero
if( sx && sy && alpha(rgba) ) {
sprite_t s;
s.px = px;
s.py = py;
s.pz = pz;
s.frame = frame;
s.ncx = xcells ? xcells : 1;
s.ncy = ycells ? ycells : 1;
s.sx = sx;
s.sy = sy;
s.ox = ox * sx;
s.oy = oy * sy;
s.cellw = texture.x * sx / s.ncx;
s.cellh = texture.y * sy / s.ncy;
s.rgba = rgba;
s.cos = 1;
s.sin = 0;
if(rotation) {
rotation = (rotation + 0) * ((float)C_PI / 180);
s.cos = cosf(rotation);
s.sin = sinf(rotation);
}
batch_group_t *batches = additive == 1 ? &sprite_additive_group : &sprite_translucent_group;
#if 0
batch_t *found = map_find(*batches, texture.id);
if( !found ) found = map_insert(*batches, texture.id, (batch_t){0});
#else
batch_t *found = map_find_or_add(*batches, texture.id, (batch_t){0});
#endif
array_push(found->sprites, s);
}
}
static void sprite_rebuild_meshes() {
sprite_count = 0;
batch_group_t* list[] = { &sprite_additive_group, &sprite_translucent_group };
for( int l = 0; l < countof(list); ++l) {
for each_map_ptr(*list[l], int,_, batch_t,bt) {
bt->dirty = array_count(bt->sprites) ? 1 : 0;
if( !bt->dirty ) continue;
int index = 0;
array_clear(sprite_indices);
array_clear(sprite_vertices);
array_foreach_ptr(bt->sprites, sprite_t,it ) {
float x0 = it->ox - it->cellw/2, x3 = x0 + it->cellw;
float y0 = it->oy - it->cellh/2, y3 = y0;
float x1 = x0, x2 = x3;
float y1 = y0 + it->cellh, y2 = y1;
// @todo: move this affine transform into glsl shader
vec3 v0 = { it->px + ( x0 * it->cos - y0 * it->sin ), it->py + ( x0 * it->sin + y0 * it->cos ), it->pz };
vec3 v1 = { it->px + ( x1 * it->cos - y1 * it->sin ), it->py + ( x1 * it->sin + y1 * it->cos ), it->pz };
vec3 v2 = { it->px + ( x2 * it->cos - y2 * it->sin ), it->py + ( x2 * it->sin + y2 * it->cos ), it->pz };
vec3 v3 = { it->px + ( x3 * it->cos - y3 * it->sin ), it->py + ( x3 * it->sin + y3 * it->cos ), it->pz };
float cx = (1.0f / it->ncx) - 1e-9f;
float cy = (1.0f / it->ncy) - 1e-9f;
int idx = (int)it->frame;
int px = idx % it->ncx;
int py = idx / it->ncx;
float ux = px * cx, uy = py * cy;
float vx = ux + cx, vy = uy + cy;
vec2 uv0 = vec2(ux, uy);
vec2 uv1 = vec2(ux, vy);
vec2 uv2 = vec2(vx, vy);
vec2 uv3 = vec2(vx, uy);
array_push( sprite_vertices, sprite_vertex(v0, uv0, it->rgba) ); // Vertex 0 (A)
array_push( sprite_vertices, sprite_vertex(v1, uv1, it->rgba) ); // Vertex 1 (B)
array_push( sprite_vertices, sprite_vertex(v2, uv2, it->rgba) ); // Vertex 2 (C)
array_push( sprite_vertices, sprite_vertex(v3, uv3, it->rgba) ); // Vertex 3 (D)
// A--B A A-B
// quad | | becomes triangle |\ and triangle \|
// D--C D-C C
GLuint A = (index+0), B = (index+1), C = (index+2), D = (index+3); index += 4;
array_push( sprite_indices, sprite_index(C, D, A) ); // Triangle 1
array_push( sprite_indices, sprite_index(C, A, B) ); // Triangle 2
}
mesh_upgrade(&bt->mesh, "p3 t2 c4b", 0,array_count(sprite_vertices),sprite_vertices, 3*array_count(sprite_indices),sprite_indices, MESH_STATIC);
// clear elements from queue
sprite_count += array_count(bt->sprites);
array_clear(bt->sprites);
}
}
}
static void sprite_render_meshes() {
if( sprite_program < 0 ) {
sprite_program = shader( vs_324_24_sprite, fs_24_4_sprite,
"att_Position,att_TexCoord,att_Color",
"fragColor"
);
}
// use the shader and bind the texture @ unit 0
shader_bind(sprite_program);
glActiveTexture(GL_TEXTURE0);
// setup rendering state
glEnable(GL_DEPTH_TEST);
glEnable(GL_BLEND);
glDepthFunc(GL_LEQUAL); // try to help with zfighting
// update camera and set mvp in the uniform
mat44 mvp2d;
float zdepth_max = window_height(); // 1;
ortho44(mvp2d, 0, window_width(), window_height(), 0, -zdepth_max, +zdepth_max);
shader_mat44("u_mvp", mvp2d);
// set (unit 0) in the uniform texture sampler, and render batch
// for all additive then translucent groups
if( map_count(sprite_additive_group) > 0 ) {
glBlendFunc( GL_SRC_ALPHA, GL_ONE );
for each_map_ptr(sprite_additive_group, int,texture_id, batch_t,bt) {
if( bt->dirty ) {
shader_texture("u_texture", *texture_id, 0);
mesh_render(&bt->mesh);
}
}
// map_clear(sprite_additive_group);
}
if( map_count(sprite_translucent_group) > 0 ) {
glBlendFunc( GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA );
for each_map_ptr(sprite_translucent_group, int,texture_id, batch_t,bt) {
if( bt->dirty ) {
shader_texture("u_texture", *texture_id, 0);
mesh_render(&bt->mesh);
}
}
// map_clear(sprite_translucent_group);
}
glDisable(GL_DEPTH_TEST);
glDisable(GL_BLEND);
glDepthFunc(GL_LESS);
glUseProgram(0);
}
static void sprite_init() {
map_init(sprite_translucent_group, less_int, hash_int);
map_init(sprite_additive_group, less_int, hash_int);
}
static void sprite_update() {
profile(Sprite rebuild) {
sprite_rebuild_meshes();
}
profile(Sprite render) {
sprite_render_meshes();
}
}
// -----------------------------------------------------------------------------
// cubemaps
// project cubemap coords into sphere normals
static
vec3 cubemap2polar(int face, int x, int y, int texture_width) {
float u = (x / (texture_width - 1.f)) * 2 - 1;
float v = (y / (texture_width - 1.f)) * 2 - 1;
/**/ if( face == 0 ) return vec3( u, -1, -v);
else if( face == 1 ) return vec3(-v, -u, 1);
else if( face == 2 ) return vec3(-1, -u, -v);
else if( face == 3 ) return vec3(-u, 1, -v);
else if( face == 4 ) return vec3( v, -u, -1);
else return vec3( 1, u, -v);
}
// project normal in a sphere as 2d texcoord
static
vec2 polar2uv(vec3 n) {
n = norm3(n);
float theta = atan2(n.y, n.x);
float phi = atan2(n.z, hypot(n.x, n.y));
float u = (theta + C_PI) / C_PI;
float v = (C_PI/2 - phi) / C_PI;
return vec2(u, v);
}
// equirectangular panorama (2:1) to cubemap - in RGB, out RGB
static
void panorama2cubemap_(image_t out[6], const image_t in, int width){
int face;
#pragma omp parallel for
for( face = 0; face < 6; ++face ) {
out[face] = image_create(width, width, IMAGE_RGB);
for (int j=0; j < width; ++j) {
uint32_t *line = &out[ face ].pixels32[ 0 + j * width ];
for (int i=0; i < width; ++i) {
vec3 polar = cubemap2polar(face, i, j, width);
vec2 uv = polar2uv(polar);
uv = scale2(uv, in.h-1); // source coords (assumes 2:1, 2*h == w)
vec3 rgb = bilinear(in, uv);
union color {
struct { uint8_t r,g,b,a; };
uint32_t rgba;
} c = { rgb.x, rgb.y, rgb.z, 255 };
line[i] = c.rgba;
}
}
}
}
// equirectangular panorama (2:1) to cubemap - in RGB, out RGBA
void panorama2cubemap(image_t out[6], const image_t in, int width) {
int face;
#pragma omp parallel for
for( face = 0; face < 6; ++face ) {
out[face] = image_create(width, width, IMAGE_RGBA);
for (int j=0; j < width; ++j) {
uint32_t *line = &out[ face ].pixels32[ 0 + j * width ];
for (int i=0; i < width; ++i) {
vec3 polar = cubemap2polar(face, i, j, width);
vec2 uv = polar2uv(polar);
uv = scale2(uv, in.h-1); // source coords (assumes 2:1, 2*h == w)
vec3 rgb = bilinear(in, uv);
union color {
struct { uint8_t r,g,b,a; };
uint32_t rgba;
} c = { rgb.x, rgb.y, rgb.z, 255 };
line[i] = c.rgba;
}
}
}
}
cubemap_t cubemap6( const image_t images[6], int flags ) {
cubemap_t c = {0}, z = {0};
glGenTextures(1, &c.id);
glBindTexture(GL_TEXTURE_CUBE_MAP, c.id);
int samples = 0;
for (int i = 0; i < 6; i++) {
image_t img = images[i]; //image(textures[i], IMAGE_RGB);
glTexImage2D(GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, 0, GL_RGB, img.w, img.h, 0, img.n == 3 ? GL_RGB : GL_RGBA, GL_UNSIGNED_BYTE, img.pixels);
// calculate SH coefficients (@ands)
const vec3 skyDir[] = {{ 1, 0, 0},{-1, 0, 0},{ 0, 1, 0},{ 0,-1, 0},{ 0, 0, 1},{ 0, 0,-1}};
const vec3 skyX[] = {{ 0, 0,-1},{ 0, 0, 1},{ 1, 0, 0},{ 1, 0, 0},{ 1, 0, 0},{-1, 0, 0}};
const vec3 skyY[] = {{ 0, 1, 0},{ 0, 1, 0},{ 0, 0,-1},{ 0, 0, 1},{ 0, 1, 0},{ 0, 1, 0}};
int step = 16;
for (int y = 0; y < img.h; y += step) {
unsigned char *p = (unsigned char*)img.pixels + y * img.w * img.n;
for (int x = 0; x < img.w; x += step) {
vec3 n = add3(
add3(
scale3(skyX[i], 2.0f * (x / (img.w - 1.0f)) - 1.0f),
scale3(skyY[i], -2.0f * (y / (img.h - 1.0f)) + 1.0f)),
skyDir[i]); // texelDirection;
float l = len3(n);
vec3 light = div3(vec3(p[0], p[1], p[2]), 255.0f * l * l * l); // texelSolidAngle * texel_radiance;
n = norm3(n);
c.sh[0] = add3(c.sh[0], scale3(light, 0.282095f));
c.sh[1] = add3(c.sh[1], scale3(light, -0.488603f * n.y * 2.0 / 3.0));
c.sh[2] = add3(c.sh[2], scale3(light, 0.488603f * n.z * 2.0 / 3.0));
c.sh[3] = add3(c.sh[3], scale3(light, -0.488603f * n.x * 2.0 / 3.0));
c.sh[4] = add3(c.sh[4], scale3(light, 1.092548f * n.x * n.y / 4.0));
c.sh[5] = add3(c.sh[5], scale3(light, -1.092548f * n.y * n.z / 4.0));
c.sh[6] = add3(c.sh[6], scale3(light, 0.315392f * (3.0f * n.z * n.z - 1.0f) / 4.0));
c.sh[7] = add3(c.sh[7], scale3(light, -1.092548f * n.x * n.z / 4.0));
c.sh[8] = add3(c.sh[8], scale3(light, 0.546274f * (n.x * n.x - n.y * n.y) / 4.0));
p += img.n * step;
samples++;
}
}
}
for (int s = 0; s < 9; s++) {
c.sh[s] = scale3(c.sh[s], 32.f / samples);
}
if( glGenerateMipmap )
glGenerateMipmap(GL_TEXTURE_CUBE_MAP);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_MIN_FILTER, glGenerateMipmap ? GL_LINEAR_MIPMAP_LINEAR : GL_LINEAR);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
glTexParameteri(GL_TEXTURE_CUBE_MAP, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
glBindTexture(GL_TEXTURE_CUBE_MAP, 0);
return c;
}
cubemap_t cubemap( const image_t in, int flags ) {
ASSERT( in.n == 4 );
image_t out[6];
panorama2cubemap(out, in, in.h);
image_t swap[6] = { out[0],out[3],out[1],out[4],out[2],out[5] };
cubemap_t c = cubemap6(swap, flags);
int i;
#pragma omp parallel for
for( i = 0; i < 6; ++i) image_destroy(&out[i]);
return c;
}
void cubemap_destroy(cubemap_t *c) {
glDeleteTextures(1, &c->id);
c->id = 0; // do not destroy SH coefficients still. they might be useful in the future.
}
static cubemap_t *last_cubemap;
cubemap_t* cubemap_get_active() {
return last_cubemap;
}
// -----------------------------------------------------------------------------
// skyboxes
skybox_t skybox(const char *asset, int flags) {
skybox_t sky = {0};
// sky mesh
vec3 vertices[] = {{+1,-1,+1},{+1,+1,+1},{+1,+1,-1},{-1,+1,-1},{+1,-1,-1},{-1,-1,-1},{-1,-1,+1},{-1,+1,+1}};
unsigned indices[] = { 0, 1, 2, 3, 4, 5, 6, 3, 7, 1, 6, 0, 4, 2 };
sky.geometry = mesh_create("p3", 0,countof(vertices),vertices, countof(indices),indices, MESH_TRIANGLE_STRIP);
// sky program
sky.flags = flags ? flags : !!asset; // either cubemap or rayleigh
sky.program = shader(vs_3_3_skybox,
sky.flags ? fs_3_4_skybox : fs_3_4_skybox_rayleigh,
"att_position", "fragcolor");
// sky cubemap & SH
if( asset ) {
int is_panorama = vfs_size( asset );
if( is_panorama ) {
stbi_hdr_to_ldr_gamma(1.2f);
image_t panorama = image( asset, IMAGE_RGBA );
sky.cubemap = cubemap( panorama, 0 ); // RGBA required
image_destroy(&panorama);
} else {
image_t images[6] = {0};
images[0] = image( stringf("%s/posx", asset), IMAGE_RGB ); // cubepx
images[1] = image( stringf("%s/negx", asset), IMAGE_RGB ); // cubenx
images[2] = image( stringf("%s/posy", asset), IMAGE_RGB ); // cubepy
images[3] = image( stringf("%s/negy", asset), IMAGE_RGB ); // cubeny
images[4] = image( stringf("%s/posz", asset), IMAGE_RGB ); // cubepz
images[5] = image( stringf("%s/negz", asset), IMAGE_RGB ); // cubenz
sky.cubemap = cubemap6( images, 0 );
for( int i = 0; i < countof(images); ++i ) image_destroy(&images[i]);
}
}
return sky;
}
int skybox_push_state(skybox_t *sky, mat44 proj, mat44 view) {
last_cubemap = &sky->cubemap;
//glClear(GL_DEPTH_BUFFER_BIT);
//glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LEQUAL);
//glDisable(GL_CULL_FACE);
glDisable(GL_DEPTH_TEST);
mat44 mvp; multiply44x2(mvp, proj, view);
//glDepthMask(GL_FALSE);
shader_bind(sky->program);
shader_mat44("u_mvp", mvp);
if( sky->flags ) {
shader_cubemap("u_cubemap", sky->cubemap.id);
}
return 0; // @fixme: return sortable hash here?
}
int skybox_pop_state(skybox_t *sky) {
//glDepthMask(GL_TRUE);
//glClear(GL_DEPTH_BUFFER_BIT);
return 0;
}
void skybox_destroy(skybox_t *sky) {
glDeleteProgram(sky->program);
cubemap_destroy(&sky->cubemap);
mesh_destroy(&sky->geometry);
}
// -----------------------------------------------------------------------------
// meshes
mesh_t mesh_create(const char *format, int vertex_stride,int vertex_count,const void *vertex_data, int index_count,const void *index_data, int flags) {
mesh_t z = {0};
mesh_upgrade(&z, format, vertex_stride,vertex_count,vertex_data, index_count,index_data, flags);
return z;
}
void mesh_upgrade(mesh_t *m, const char *format, int vertex_stride,int vertex_count,const void *vertex_data, int index_count,const void *index_data, int flags) {
m->flags = flags;
// setup
unsigned sizeof_index = sizeof(GLuint);
unsigned sizeof_vertex = 0;
m->index_count = index_count;
m->vertex_count = vertex_count;
// iterate vertex attributes { position, normal + uv + tangent + bitangent + ... }
struct vertex_descriptor {
int vertex_type, num_attribute, num_components, alt_normalized;
int stride, offset;
} descriptor[16] = {0}, *dc = &descriptor[0];
do switch( *format ) {
break; case '*': dc->alt_normalized = 1;
break; case '0': dc->num_components = 0;
break; case '1': dc->num_components = 1;
break; case '2': dc->num_components = 2;
break; case '3': dc->num_components = 3;
break; case '4': dc->num_components = 4;
break; case 'f': dc->vertex_type = GL_FLOAT;
break; case 'u': case 'i': dc->vertex_type = GL_UNSIGNED_INT;
break; case 'b': if(format[-1] >= '0' && format[-1] <= '9') dc->vertex_type = GL_UNSIGNED_BYTE; //else bitangent.
break; case ' ': while (format[1] == ' ') format++; case '\0':
if (!dc->vertex_type) dc->vertex_type = GL_FLOAT;
dc->offset = sizeof_vertex;
sizeof_vertex += (dc->stride = dc->num_components * (dc->vertex_type == GL_UNSIGNED_BYTE ? 1 : 4));
++dc;
break; default: if( !strchr("pntcwai", *format) ) PANIC("unsupported vertex type '%c'", *format);
} while (*format++);
if(vertex_stride > 0) sizeof_vertex = vertex_stride;
// layout
if(!m->vao) glGenVertexArrays(1, &m->vao);
glBindVertexArray(m->vao);
// index data
if( index_data && index_count ) {
m->index_count = index_count;
if(!m->ibo) glGenBuffers(1, &m->ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, m->ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, m->index_count * sizeof_index, index_data, flags & MESH_STREAM ? GL_STREAM_DRAW : GL_STATIC_DRAW);
}
// vertex data
if( vertex_data && vertex_count ) {
m->vertex_count = vertex_count;
if(!m->vbo) glGenBuffers(1, &m->vbo);
glBindBuffer(GL_ARRAY_BUFFER, m->vbo);
glBufferData(GL_ARRAY_BUFFER, m->vertex_count * sizeof_vertex, vertex_data, flags & MESH_STREAM ? GL_STREAM_DRAW : GL_STATIC_DRAW);
}
for( int i = 0; i < 8; ++i ) {
// glDisableVertexAttribArray(i);
}
// vertex setup: iterate descriptors
for( int i = 0; i < countof(descriptor); ++i ) {
if( descriptor[i].num_components ) {
glDisableVertexAttribArray(i);
glVertexAttribPointer(i,
descriptor[i].num_components, descriptor[i].vertex_type, (descriptor[i].vertex_type == GL_UNSIGNED_BYTE ? GL_TRUE : GL_FALSE) ^ (descriptor[i].alt_normalized ? GL_TRUE : GL_FALSE),
sizeof_vertex, (GLchar*)NULL + descriptor[i].offset);
glEnableVertexAttribArray(i);
} else {
glDisableVertexAttribArray(i);
}
}
glBindVertexArray(0);
}
void mesh_pop_state(mesh_t *sm) {
}
void mesh_push_state(mesh_t *sm, unsigned program, unsigned texture_id, float model[16], float view[16], float proj[16], unsigned billboard) {
glEnable(GL_DEPTH_TEST);
glDepthFunc(GL_LESS);
glActiveTexture(GL_TEXTURE0);
shader_bind(program);
mat44 mv; multiply44x2(mv, view, model);
if( billboard ) {
float d = sqrt(mv[4*0+0] * mv[4*0+0] + mv[4*1+1] * mv[4*1+1] + mv[4*2+2] * mv[4*2+2]);
if(billboard & 4) mv[4*0+0] = d, mv[4*0+1] = 0, mv[4*0+2] = 0;
if(billboard & 2) mv[4*1+0] = 0, mv[4*1+1] = d, mv[4*1+2] = 0;
if(billboard & 1) mv[4*2+0] = 0, mv[4*2+1] = 0, mv[4*2+2] = d;
}
mat44 mvp; multiply44x2(mvp, proj, mv); // multiply44x3(mvp, proj, view, model);
shader_mat44("u_mvp", mvp);
if (cubemap_get_active()) {
GLuint uniform_loc = glGetUniformLocation(program, "u_coefficients_sh");
glUniform3fv(uniform_loc, 9, &cubemap_get_active()->sh[0].x);
}
shader_texture("u_texture2d", texture_id, 0);
}
void mesh_render(mesh_t *sm) {
glBindVertexArray(sm->vao);
if( sm->ibo ) { // with indices
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, sm->ibo); // <-- why intel?
glDrawElements(sm->flags & MESH_TRIANGLE_STRIP ? GL_TRIANGLE_STRIP : GL_TRIANGLES, sm->index_count, GL_UNSIGNED_INT, (char*)0);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", sm->index_count/3);
} else { // with vertices only
glDrawArrays(sm->flags & MESH_TRIANGLE_STRIP ? GL_TRIANGLE_STRIP : GL_TRIANGLES, 0, sm->vertex_count /* / 3 */);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", sm->vertex_count/3);
}
}
void mesh_destroy(mesh_t *m) {
// @todo
}
// -----------------------------------------------------------------------------
// screenshots
void* screenshot( unsigned n ) { // 3 RGB, 4 RGBA, -3 BGR, -4 BGRA
int w = window_width(), h = window_height();
int mode = n == 3 ? GL_RGB : n == -3 ? GL_BGR : n == 4 ? GL_RGBA : GL_BGRA;
static local uint8_t *pixels = 0;
pixels = (uint8_t*)REALLOC(pixels, w * h * 4 );
#if 0
// sync, 10 ms
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0); // disable any pbo, in case somebody did for us
glPixelStorei(GL_PACK_ALIGNMENT, 1);
glReadBuffer(GL_FRONT);
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, pixels);
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
return pixels;
#else
// async
enum { NUM_PBOS = 16 };
static local GLuint pbo[NUM_PBOS] = {0}, lastw, lasth;
static local int frame = 0, bound = 0;
if( lastw != w || lasth != h ) {
lastw = w, lasth = h;
frame = 0;
bound = 0;
// @fixme: delete previous pbos
for( int i = 0; i < NUM_PBOS; ++i ) {
glGenBuffers(1, &pbo[i]);
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[i]);
glBufferData(GL_PIXEL_PACK_BUFFER, w * h * 4, NULL, GL_STREAM_READ); // GL_STATIC_READ);
}
}
if (frame < NUM_PBOS) {
// do setup during initial frames
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[bound]);
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, (GLvoid*)((GLchar*)NULL+0));
} else {
// read from oldest bound pbo
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo[bound]);
void *ptr = glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY);
memcpy(pixels, ptr, w * h * abs(n));
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
// trigger next read
glReadPixels(0, 0, w, h, mode, GL_UNSIGNED_BYTE, (GLvoid*)((GLchar*)NULL+0));
}
bound = (bound + 1) % NUM_PBOS;
frame += frame >= 0 && frame < NUM_PBOS;
frame *= frame == NUM_PBOS ? -1 : +1;
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
return pixels;
#endif
}
// -----------------------------------------------------------------------------
// viewports
void viewport_color(vec3 color3) {
glClearColor(color3.x, color3.y, color3.z, 1);
}
void viewport_clear(bool color, bool depth) {
glClearDepthf(1);
glClearStencil(0);
glClear((color ? GL_COLOR_BUFFER_BIT : 0) | (depth ? GL_DEPTH_BUFFER_BIT : 0));
}
void viewport_clip(vec2 from, vec2 to) {
float x = from.x, y = from.y, w = to.x-from.x, h = to.y-from.y;
y = window_height()-y-h;
glViewport(x, y, w, h);
glScissor(x, y, w, h);
}
// -----------------------------------------------------------------------------
// fbos
unsigned fbo(unsigned color_texture_id, unsigned depth_texture_id, int flags) {
GLuint fbo;
glGenFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
if( color_texture_id ) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, color_texture_id, 0);
if( depth_texture_id ) glFramebufferTexture2D(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_TEXTURE_2D, depth_texture_id, 0);
#if 0 // this is working; it's just not enabled for now
else {
// create a non-sampleable renderbuffer object for depth and stencil attachments
unsigned int rbo;
glGenRenderbuffers(1, &rbo);
glBindRenderbuffer(GL_RENDERBUFFER, rbo);
glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH24_STENCIL8, color.width, color.height); // use a single renderbuffer object for both a depth AND stencil buffer.
glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_STENCIL_ATTACHMENT, GL_RENDERBUFFER, rbo); // now actually attach it
}
#endif
if(flags) glDrawBuffer(GL_NONE);
if(flags) glReadBuffer(GL_NONE);
#if 1
GLenum result = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if( GL_FRAMEBUFFER_COMPLETE != result ) {
PANIC("ERROR: Framebuffer not complete.");
}
#else
switch (glCheckFramebufferStatus(GL_FRAMEBUFFER)) {
case GL_FRAMEBUFFER_COMPLETE: break;
case GL_FRAMEBUFFER_UNDEFINED: PANIC("GL_FRAMEBUFFER_UNDEFINED");
case GL_FRAMEBUFFER_UNSUPPORTED: PANIC("GL_FRAMEBUFFER_UNSUPPORTED");
case GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_ATTACHMENT");
case GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER: PANIC("GL_FRAMEBUFFER_INCOMPLETE_DRAW_BUFFER");
case GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER: PANIC("GL_FRAMEBUFFER_INCOMPLETE_READ_BUFFER");
case GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE: PANIC("GL_FRAMEBUFFER_INCOMPLETE_MULTISAMPLE");
// case GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_FORMATS_EXT");
case GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS: PANIC("GL_FRAMEBUFFER_INCOMPLETE_LAYER_TARGETS");
// case GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_DIMENSIONS_EXT");
case GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT: PANIC("GL_FRAMEBUFFER_INCOMPLETE_MISSING_ATTACHMENT");
default: PANIC("ERROR: Framebuffer not complete. glCheckFramebufferStatus returned %x", glCheckFramebufferStatus(GL_FRAMEBUFFER));
}
#endif
glBindFramebuffer (GL_FRAMEBUFFER, 0);
return fbo;
}
void fbo_bind(unsigned id) {
glBindFramebuffer(GL_FRAMEBUFFER, id);
}
void fbo_unbind() {
fbo_bind(0);
}
void fbo_destroy(unsigned id) {
// glDeleteRenderbuffers(1, &renderbuffer);
glDeleteFramebuffers(1, &id);
}
// -----------------------------------------------------------------------------
// post-fxs swapchain
typedef struct passfx passfx;
typedef struct postfx postfx;
void postfx_create(postfx *fx, int flags);
void postfx_destroy(postfx *fx);
bool postfx_load(postfx *fx, const char *name, const char *fragment);
bool postfx_begin(postfx *fx, int width, int height);
bool postfx_end(postfx *fx);
bool postfx_enabled(postfx *fx, int pass_number);
bool postfx_enable(postfx *fx, int pass_number, bool enabled);
// bool postfx_toggle(postfx *fx, int pass_number);
void postfx_clear(postfx *fx);
char* postfx_name(postfx *fx, int slot);
struct passfx {
mesh_t m;
char *name;
unsigned program;
int uniforms[16];
};
struct postfx {
// renderbuffers: color & depth textures
unsigned fb[2];
texture_t diffuse[2], depth[2];
// shader passes
passfx pass[64];
uint64_t mask;
// global enable flag
bool enabled;
//
int num_loaded;
};
enum {
u_color,
u_depth,
u_time,
u_frame,
u_width, u_height,
u_mousex, u_mousey,
u_channelres0x, u_channelres0y,
u_channelres1x, u_channelres1y,
};
void postfx_create(postfx *fx, int flags) {
postfx z = {0};
*fx = z;
fx->enabled = 1;
}
void postfx_destroy( postfx *fx ) {
for( int i = 0; i < 64; ++i ) {
FREE(fx->pass[i].name);
}
texture_destroy(&fx->diffuse[0]);
texture_destroy(&fx->diffuse[1]);
texture_destroy(&fx->depth[0]);
texture_destroy(&fx->depth[1]);
fbo_destroy(fx->fb[0]);
fbo_destroy(fx->fb[1]);
postfx z = {0};
*fx = z;
}
char* postfx_name(postfx *fx, int slot) {
return fx->pass[ slot & 63 ].name;
}
bool postfx_load_from_mem( postfx *fx, const char *name, const char *fs ) {
if(!fs || !fs[0]) PANIC("!invalid fragment shader");
int slot = fx->num_loaded++;
passfx *p = &fx->pass[ slot & 63 ];
p->name = STRDUP(name);
const char *vs = vs_0_2_fullscreen_quad_B;
// patch fragment
char *fs2 = (char*)CALLOC(1, 64*1024);
strcat(fs2, fs_2_4_preamble);
if( strstr(fs, "mainImage") ) {
strcat(fs2, fs_main_shadertoy );
}
strcat(fs2, fs);
p->program = shader(vs, fs2, "vtexcoord", "fragColor" );
FREE(fs2);
glUseProgram(p->program); // needed?
for( int i = 0; i < countof(p->uniforms); ++i ) p->uniforms[i] = -1;
if( p->uniforms[u_time] == -1 ) p->uniforms[u_time] = glGetUniformLocation(p->program, "iTime");
if( p->uniforms[u_frame] == -1 ) p->uniforms[u_frame] = glGetUniformLocation(p->program, "iFrame");
if( p->uniforms[u_width] == -1 ) p->uniforms[u_width] = glGetUniformLocation(p->program, "iWidth");
if( p->uniforms[u_height] == -1 ) p->uniforms[u_height] = glGetUniformLocation(p->program, "iHeight");
if( p->uniforms[u_mousex] == -1 ) p->uniforms[u_mousex] = glGetUniformLocation(p->program, "iMousex");
if( p->uniforms[u_mousey] == -1 ) p->uniforms[u_mousey] = glGetUniformLocation(p->program, "iMousey");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tex");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tex0");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tColor");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "tDiffuse");
if( p->uniforms[u_color] == -1 ) p->uniforms[u_color] = glGetUniformLocation(p->program, "iChannel0");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "tex1");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "tDepth");
if( p->uniforms[u_depth] == -1 ) p->uniforms[u_depth] = glGetUniformLocation(p->program, "iChannel1");
if( p->uniforms[u_channelres0x] == -1 ) p->uniforms[u_channelres0x] = glGetUniformLocation(p->program, "iChannelRes0x");
if( p->uniforms[u_channelres0y] == -1 ) p->uniforms[u_channelres0y] = glGetUniformLocation(p->program, "iChannelRes0y");
if( p->uniforms[u_channelres1x] == -1 ) p->uniforms[u_channelres1x] = glGetUniformLocation(p->program, "iChannelRes1x");
if( p->uniforms[u_channelres1y] == -1 ) p->uniforms[u_channelres1y] = glGetUniformLocation(p->program, "iChannelRes1y");
// set quad
glGenVertexArrays(1, &p->m.vao);
return true;
}
uint64_t postfx_count_ones(uint64_t x) {
// [src] https://en.wikipedia.org/wiki/Hamming_weight
x -= (x >> 1) & 0x5555555555555555ULL; //put count of each 2 bits into those 2 bits
x = (x & 0x3333333333333333ULL) + ((x >> 2) & 0x3333333333333333ULL); //put count of each 4 bits into those 4 bits
x = (x + (x >> 4)) & 0x0f0f0f0f0f0f0f0fULL; //put count of each 8 bits into those 8 bits
return (x * 0x0101010101010101ULL) >> 56; //returns left 8 bits of x + (x<<8) + (x<<16) + (x<<24) + ...
}
bool postfx_enable(postfx *fx, int pass, bool enabled) {
fx->mask = enabled ? fx->mask | (1ull << pass) : fx->mask & ~(1ull << pass);
fx->enabled = !!postfx_count_ones(fx->mask);
return fx->enabled;
}
bool postfx_enabled(postfx *fx, int pass) {
return (!!(fx->mask & (1ull << pass)));
}
bool postfx_toggle(postfx *fx, int pass) {
return postfx_enable(fx, pass, 1 ^ postfx_enabled(fx, pass));
}
void postfx_clear(postfx *fx) {
fx->mask = fx->enabled = 0;
}
bool postfx_begin(postfx *fx, int width, int height) {
width += !width;
height += !height;
// resize if needed
if( fx->diffuse[0].w != width || fx->diffuse[0].h != height ) {
texture_destroy(&fx->diffuse[0]);
texture_destroy(&fx->diffuse[1]);
texture_destroy(&fx->depth[0]);
texture_destroy(&fx->depth[1]);
fbo_destroy(fx->fb[0]);
fbo_destroy(fx->fb[1]);
// create texture, set texture parameters and content
fx->diffuse[0] = texture_create(width, height, 4, NULL, TEXTURE_RGBA);
fx->depth[0] = texture_create(width, height, 1, NULL, TEXTURE_DEPTH|TEXTURE_FLOAT);
fx->fb[0] = fbo(fx->diffuse[0].id, fx->depth[0].id, 0);
// create texture, set texture parameters and content
fx->diffuse[1] = texture_create(width, height, 4, NULL, TEXTURE_RGBA);
fx->depth[1] = texture_create(width, height, 1, NULL, TEXTURE_DEPTH|TEXTURE_FLOAT);
fx->fb[1] = fbo(fx->diffuse[1].id, fx->depth[1].id, 0);
}
uint64_t num_active_passes = postfx_count_ones(fx->mask);
bool active = fx->enabled && num_active_passes;
if( !active ) {
fbo_unbind();
return false;
}
fbo_bind(fx->fb[1]);
viewport_clear(true, true);
viewport_clip(vec2(0,0), vec2(width, height));
fbo_bind(fx->fb[0]);
viewport_clear(true, true);
viewport_clip(vec2(0,0), vec2(width, height));
return true;
}
bool postfx_end(postfx *fx) {
uint64_t num_active_passes = postfx_count_ones(fx->mask);
bool active = fx->enabled && num_active_passes;
if( !active ) {
return false;
}
fbo_unbind();
// disable depth test in 2d rendering
glDisable(GL_DEPTH_TEST);
int frame = 0;
float t = time_ms() / 1000.f;
float w = fx->diffuse[0].w;
float h = fx->diffuse[0].h;
float mx = input(MOUSE_X);
float my = input(MOUSE_Y);
for(int i = 0, e = countof(fx->pass); i < e; ++i) {
if( fx->mask & (1ull << i) ) {
passfx *pass = &fx->pass[i];
if( !pass->program ) { --num_active_passes; continue; }
glUseProgram(pass->program);
// bind texture to texture unit 0
// shader_texture(fx->diffuse[frame], 0);
glActiveTexture(GL_TEXTURE0 + 0); glBindTexture(GL_TEXTURE_2D, fx->diffuse[frame].id);
glUniform1i(pass->uniforms[u_color], 0);
glUniform1f(pass->uniforms[u_channelres0x], fx->diffuse[frame].w);
glUniform1f(pass->uniforms[u_channelres0y], fx->diffuse[frame].h);
// bind depth to texture unit 1
// shader_texture(fx->depth[frame], 1);
glActiveTexture(GL_TEXTURE0 + 1); glBindTexture(GL_TEXTURE_2D, fx->depth[frame].id);
glUniform1i(pass->uniforms[u_depth], 1);
// bind uniforms
static unsigned f = 0; ++f;
glUniform1f(pass->uniforms[u_time], t);
glUniform1f(pass->uniforms[u_frame], f-1);
glUniform1f(pass->uniforms[u_width], w);
glUniform1f(pass->uniforms[u_height], h);
glUniform1f(pass->uniforms[u_mousex], mx);
glUniform1f(pass->uniforms[u_mousey], my);
// bind the vao
int bound = --num_active_passes;
if( bound ) fbo_bind(fx->fb[frame ^= 1]);
// fullscreen quad
glBindVertexArray(pass->m.vao);
glDrawArrays(GL_TRIANGLES, 0, 6);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +2);
glBindVertexArray(0);
if( bound ) fbo_unbind();
else glUseProgram(0);
}
}
return true;
}
static postfx fx;
void fx_load_from_mem(const char *nameid, const char *content) {
do_once postfx_create(&fx, 0);
postfx_load_from_mem(&fx, nameid, content);
}
void fx_load(const char *file) {
postfx_load_from_mem(&fx, file_name(file), vfs_read(file));
}
void fx_begin() {
postfx_begin(&fx, window_width(), window_height());
}
void fx_end() {
postfx_end(&fx);
}
int fx_enabled(int pass) {
return postfx_enabled(&fx, pass);
}
void fx_enable(int pass, int enabled) {
postfx_enable(&fx, pass, enabled);
}
void fx_enable_all(int enabled) {
for( int i = 0; i < fx.num_loaded; ++i ) fx_enable(i, enabled);
}
char *fx_name(int pass) {
return postfx_name(&fx, pass);
}
// -----------------------------------------------------------------------------
// skeletal meshes (iqm)
#define IQM_MAGIC "INTERQUAKEMODEL"
#define IQM_VERSION 2
struct iqmheader {
char magic[16];
unsigned version;
unsigned filesize;
unsigned flags;
unsigned num_text, ofs_text;
unsigned num_meshes, ofs_meshes;
unsigned num_vertexarrays, num_vertexes, ofs_vertexarrays;
unsigned num_triangles, ofs_triangles, ofs_adjacency;
unsigned num_joints, ofs_joints;
unsigned num_poses, ofs_poses;
unsigned num_anims, ofs_anims;
unsigned num_frames, num_framechannels, ofs_frames, ofs_bounds;
unsigned num_comment, ofs_comment;
unsigned num_extensions, ofs_extensions;
};
struct iqmmesh {
unsigned name;
unsigned material;
unsigned first_vertex, num_vertexes;
unsigned first_triangle, num_triangles;
};
enum {
IQM_POSITION,
IQM_TEXCOORD,
IQM_NORMAL,
IQM_TANGENT,
IQM_BLENDINDEXES,
IQM_BLENDWEIGHTS,
IQM_COLOR,
IQM_CUSTOM = 0x10
};
enum {
IQM_BYTE,
IQM_UBYTE,
IQM_SHORT,
IQM_USHORT,
IQM_INT,
IQM_UINT,
IQM_HALF,
IQM_FLOAT,
IQM_DOUBLE,
};
struct iqmtriangle {
unsigned vertex[3];
};
struct iqmadjacency {
unsigned triangle[3];
};
struct iqmjoint {
unsigned name;
int parent;
float translate[3], rotate[4], scale[3];
};
struct iqmpose {
int parent;
unsigned mask;
float channeloffset[10];
float channelscale[10];
};
struct iqmanim {
unsigned name;
unsigned first_frame, num_frames;
float framerate;
unsigned flags;
};
enum {
IQM_LOOP = 1<<0
};
struct iqmvertexarray {
unsigned type;
unsigned flags;
unsigned format;
unsigned size;
unsigned offset;
};
struct iqmbounds {
union {
struct { float bbmin[3], bbmax[3]; };
struct { vec3 min3, max3; };
aabb box;
};
float xyradius, radius;
};
// -----------------------------------------------------------------------------
typedef struct iqm_vertex {
GLfloat position[3];
GLfloat texcoord[2];
GLfloat normal[3];
GLfloat tangent[4];
GLubyte blendindexes[4];
GLubyte blendweights[4];
GLubyte color[4];
} iqm_vertex;
typedef struct iqm_t {
int nummeshes, numtris, numverts, numjoints, numframes, numanims;
GLuint program;
GLuint vao, ibo, vbo;
GLuint *textures;
uint8_t *buf, *meshdata, *animdata;
struct iqmmesh *meshes;
struct iqmjoint *joints;
struct iqmpose *poses;
struct iqmanim *anims;
struct iqmbounds *bounds;
mat34 *baseframe, *inversebaseframe, *outframe, *frames;
GLint bonematsoffset;
} iqm_t;
#define program (q->program)
#define meshdata (q->meshdata)
#define animdata (q->animdata)
#define nummeshes (q->nummeshes)
#define numtris (q->numtris)
#define numverts (q->numverts)
#define numjoints (q->numjoints)
#define numframes (q->numframes)
#define numanims (q->numanims)
#define meshes (q->meshes)
#define textures (q->textures)
#define joints (q->joints)
#define poses (q->poses)
#define anims (q->anims)
#define baseframe (q->baseframe)
#define inversebaseframe (q->inversebaseframe)
#define outframe (q->outframe)
#define frames (q->frames)
#define vao (q->vao)
#define ibo (q->ibo)
#define vbo (q->vbo)
#define bonematsoffset (q->bonematsoffset)
#define buf (q->buf)
#define bounds (q->bounds)
static
void model_set_uniforms(model_t m, int shader, mat44 proj, mat44 view, mat44 model) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glUseProgram(shader);
int loc;
//if( (loc = glGetUniformLocation(shader, "M")) >= 0 ) glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, m); // RIM
if( (loc = glGetUniformLocation(shader, "MVP")) >= 0 ) {
mat44 mvp; multiply44x3(mvp, proj, view, model);
glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, mvp);
}
else
if( (loc = glGetUniformLocation(shader, "u_mvp")) >= 0 ) {
mat44 mvp; multiply44x3(mvp, proj, view, model);
glUniformMatrix4fv( loc, 1, GL_FALSE/*GL_TRUE*/, mvp);
}
#if 0
// @todo: mat44 projview
#endif
if ((loc = glGetUniformLocation(shader, "M")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, model);
}
else
if ((loc = glGetUniformLocation(shader, "model")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, model);
}
if ((loc = glGetUniformLocation(shader, "V")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, view);
}
else
if ((loc = glGetUniformLocation(shader, "view")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, view);
}
if ((loc = glGetUniformLocation(shader, "P")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, proj);
}
else
if ((loc = glGetUniformLocation(shader, "proj")) >= 0) {
glUniformMatrix4fv(loc, 1, GL_FALSE/*GL_TRUE*/, proj);
}
if( (loc = glGetUniformLocation(shader, "SKINNED")) >= 0 ) glUniform1i( loc, numanims ? GL_TRUE : GL_FALSE);
if( numanims )
if( (loc = glGetUniformLocation(shader, "vsBoneMatrix")) >= 0 ) glUniformMatrix3x4fv( loc, numjoints, GL_FALSE, outframe[0]);
}
static
void model_set_state(model_t m) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glBindVertexArray( vao );
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, position) );
glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, texcoord) );
glVertexAttribPointer(2, 3, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, normal) );
glVertexAttribPointer(3, 4, GL_FLOAT, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex, tangent) );
glEnableVertexAttribArray(0);
glEnableVertexAttribArray(1);
glEnableVertexAttribArray(2);
glEnableVertexAttribArray(3);
if(numframes > 0) {
glVertexAttribPointer(4, 4, GL_UNSIGNED_BYTE, GL_FALSE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex,blendindexes) );
glVertexAttribPointer(5, 4, GL_UNSIGNED_BYTE, GL_TRUE, sizeof(iqm_vertex), (GLvoid*)offsetof(iqm_vertex,blendweights) );
glEnableVertexAttribArray(4);
glEnableVertexAttribArray(5);
}
// 6 color
// 7 bitangent? into texcoord.z?
// [draw]
#if 0
glDisableVertexAttribArray(1);
if(numframes > 0) {
glDisableVertexAttribArray(4);
glDisableVertexAttribArray(5);
}
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
glBindBuffer(GL_ARRAY_BUFFER, 0);
#endif
glBindVertexArray( 0 );
}
static
bool model_load_meshes(iqm_t *q, const struct iqmheader *hdr) {
if(meshdata) return false;
lil32p(&buf[hdr->ofs_vertexarrays], hdr->num_vertexarrays*sizeof(struct iqmvertexarray)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_triangles], hdr->num_triangles*sizeof(struct iqmtriangle)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_meshes], hdr->num_meshes*sizeof(struct iqmmesh)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_joints], hdr->num_joints*sizeof(struct iqmjoint)/sizeof(uint32_t));
meshdata = buf;
nummeshes = hdr->num_meshes;
numtris = hdr->num_triangles;
numverts = hdr->num_vertexes;
numjoints = hdr->num_joints;
outframe = CALLOC(hdr->num_joints, sizeof(mat34));
float *inposition = NULL, *innormal = NULL, *intangent = NULL, *intexcoord = NULL;
uint8_t *inblendindex8 = NULL, *inblendweight8 = NULL;
int *inblendindexi = NULL; float *inblendweightf = NULL;
struct iqmvertexarray *vas = (struct iqmvertexarray *)&buf[hdr->ofs_vertexarrays];
for(int i = 0; i < (int)hdr->num_vertexarrays; i++) {
struct iqmvertexarray *va = &vas[i];
switch(va->type) {
default: continue; // return PANIC("unknown iqm vertex type (%d)", va->type), false;
break; case IQM_POSITION: if(va->format != IQM_FLOAT || va->size != 3) return PANIC("!"); false; inposition = (float *)&buf[va->offset]; lil32pf(inposition, 3*hdr->num_vertexes);
break; case IQM_NORMAL: if(va->format != IQM_FLOAT || va->size != 3) return PANIC("!"); false; innormal = (float *)&buf[va->offset]; lil32pf(innormal, 3*hdr->num_vertexes);
break; case IQM_TANGENT: if(va->format != IQM_FLOAT || va->size != 4) return PANIC("!"); false; intangent = (float *)&buf[va->offset]; lil32pf(intangent, 4*hdr->num_vertexes);
break; case IQM_TEXCOORD: if(va->format != IQM_FLOAT || va->size != 2) return PANIC("!"); false; intexcoord = (float *)&buf[va->offset]; lil32pf(intexcoord, 2*hdr->num_vertexes);
break; case IQM_BLENDINDEXES: if(va->size != 4) return PANIC("!"); false; if(va->format != IQM_UBYTE && va->format != IQM_INT) return PANIC("!"); false;
if(va->format == IQM_UBYTE) inblendindex8 = (uint8_t *)&buf[va->offset];
else inblendindexi = (int *)&buf[va->offset];
break; case IQM_BLENDWEIGHTS: if(va->size != 4) return PANIC("!"); false; if(va->format != IQM_UBYTE && va->format != IQM_FLOAT) return PANIC("!"); false;
if(va->format == IQM_UBYTE) inblendweight8 = (uint8_t *)&buf[va->offset];
else inblendweightf = (float *)&buf[va->offset];
}
}
if (hdr->ofs_bounds) lil32p(buf + hdr->ofs_bounds, hdr->num_frames * sizeof(struct iqmbounds));
if (hdr->ofs_bounds) bounds = (struct iqmbounds *) &buf[hdr->ofs_bounds];
meshes = (struct iqmmesh *)&buf[hdr->ofs_meshes];
joints = (struct iqmjoint *)&buf[hdr->ofs_joints];
baseframe = CALLOC(hdr->num_joints, sizeof(mat34));
inversebaseframe = CALLOC(hdr->num_joints, sizeof(mat34));
for(int i = 0; i < (int)hdr->num_joints; i++) {
struct iqmjoint *j = &joints[i];
compose34(baseframe[i], ptr3(j->translate), normq(ptrq(j->rotate)), ptr3(j->scale));
invert34(inversebaseframe[i], baseframe[i]);
if(j->parent >= 0) {
multiply34x2(baseframe[i], baseframe[j->parent], baseframe[i]);
multiply34(inversebaseframe[i], inversebaseframe[j->parent]);
}
}
struct iqmtriangle *tris = (struct iqmtriangle *)&buf[hdr->ofs_triangles];
glGenVertexArrays(1, &vao);
glBindVertexArray(vao);
if(!ibo) glGenBuffers(1, &ibo);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ibo);
glBufferData(GL_ELEMENT_ARRAY_BUFFER, hdr->num_triangles*sizeof(struct iqmtriangle), tris, GL_STATIC_DRAW);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
iqm_vertex *verts = CALLOC(hdr->num_vertexes, sizeof(iqm_vertex));
for(int i = 0; i < (int)hdr->num_vertexes; i++) {
iqm_vertex *v = &verts[i];
if(inposition) memcpy(v->position, &inposition[i*3], sizeof(v->position));
if(innormal) memcpy(v->normal, &innormal[i*3], sizeof(v->normal));
if(intangent) memcpy(v->tangent, &intangent[i*4], sizeof(v->tangent));
if(intexcoord) memcpy(v->texcoord, &intexcoord[i*2], sizeof(v->texcoord));
if(inblendindex8) memcpy(v->blendindexes, &inblendindex8[i*4], sizeof(v->blendindexes));
if(inblendweight8) memcpy(v->blendweights, &inblendweight8[i*4], sizeof(v->blendweights));
if(inblendindexi) {
uint8_t conv[4] = { inblendindexi[i*4], inblendindexi[i*4+1], inblendindexi[i*4+2], inblendindexi[i*4+3] };
memcpy(v->blendindexes, conv, sizeof(v->blendindexes));
}
if(inblendweightf) {
uint8_t conv[4] = { inblendweightf[i*4] * 255, inblendweightf[i*4+1] * 255, inblendweightf[i*4+2] * 255, inblendweightf[i*4+3] * 255 };
memcpy(v->blendweights, conv, sizeof(v->blendweights));
}
}
if(!vbo) glGenBuffers(1, &vbo);
glBindBuffer(GL_ARRAY_BUFFER, vbo);
glBufferData(GL_ARRAY_BUFFER, hdr->num_vertexes*sizeof(iqm_vertex), verts, GL_STATIC_DRAW);
glBindBuffer(GL_ARRAY_BUFFER, 0);
FREE(verts);
textures = CALLOC(hdr->num_meshes, sizeof(GLuint));
for(int i = 0; i < (int)hdr->num_meshes; i++) {
int invalid = texture_checker().id;
textures[i] = invalid;
}
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_meshes; i++) {
struct iqmmesh *m = &meshes[i];
PRINTF("loaded mesh: %s\n", &str[m->name]);
}
return true;
}
static
bool model_load_anims(iqm_t *q, const struct iqmheader *hdr) {
if((int)hdr->num_poses != numjoints) return false;
if(animdata) {
if(animdata != meshdata) FREE(animdata);
FREE(frames);
animdata = NULL;
anims = NULL;
frames = 0;
numframes = 0;
numanims = 0;
}
lil32p(&buf[hdr->ofs_poses], hdr->num_poses*sizeof(struct iqmpose)/sizeof(uint32_t));
lil32p(&buf[hdr->ofs_anims], hdr->num_anims*sizeof(struct iqmanim)/sizeof(uint32_t));
lil16p((uint16_t *)&buf[hdr->ofs_frames], hdr->num_frames*hdr->num_framechannels);
animdata = buf;
numanims = hdr->num_anims;
numframes = hdr->num_frames;
anims = (struct iqmanim *)&buf[hdr->ofs_anims];
poses = (struct iqmpose *)&buf[hdr->ofs_poses];
frames = CALLOC(hdr->num_frames * hdr->num_poses, sizeof(mat34));
uint16_t *framedata = (uint16_t *)&buf[hdr->ofs_frames];
for(int i = 0; i < (int)hdr->num_frames; i++) {
for(int j = 0; j < (int)hdr->num_poses; j++) {
struct iqmpose *p = &poses[j];
quat rotate;
vec3 translate, scale;
translate.x = p->channeloffset[0]; if(p->mask&0x01) translate.x += *framedata++ * p->channelscale[0];
translate.y = p->channeloffset[1]; if(p->mask&0x02) translate.y += *framedata++ * p->channelscale[1];
translate.z = p->channeloffset[2]; if(p->mask&0x04) translate.z += *framedata++ * p->channelscale[2];
rotate.x = p->channeloffset[3]; if(p->mask&0x08) rotate.x += *framedata++ * p->channelscale[3];
rotate.y = p->channeloffset[4]; if(p->mask&0x10) rotate.y += *framedata++ * p->channelscale[4];
rotate.z = p->channeloffset[5]; if(p->mask&0x20) rotate.z += *framedata++ * p->channelscale[5];
rotate.w = p->channeloffset[6]; if(p->mask&0x40) rotate.w += *framedata++ * p->channelscale[6];
scale.x = p->channeloffset[7]; if(p->mask&0x80) scale.x += *framedata++ * p->channelscale[7];
scale.y = p->channeloffset[8]; if(p->mask&0x100) scale.y += *framedata++ * p->channelscale[8];
scale.z = p->channeloffset[9]; if(p->mask&0x200) scale.z += *framedata++ * p->channelscale[9];
// Concatenate each pose with the inverse base pose to avoid doing this at animation time.
// If the joint has a parent, then it needs to be pre-concatenated with its parent's base pose.
// Thus it all negates at animation time like so:
// (parentPose * parentInverseBasePose) * (parentBasePose * childPose * childInverseBasePose) =>
// parentPose * (parentInverseBasePose * parentBasePose) * childPose * childInverseBasePose =>
// parentPose * childPose * childInverseBasePose
mat34 m; compose34(m, translate, normq(rotate), scale);
if(p->parent >= 0) multiply34x3(frames[i*hdr->num_poses + j], baseframe[p->parent], m, inversebaseframe[j]);
else multiply34x2(frames[i*hdr->num_poses + j], m, inversebaseframe[j]);
}
}
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_anims; i++) {
struct iqmanim *a = &anims[i];
PRINTF("loaded anim[%d]: %s\n", i, &str[a->name]);
}
return true;
}
static
bool model_load_textures(iqm_t *q, const struct iqmheader *hdr) {
textures = textures ? textures : CALLOC(hdr->num_meshes, sizeof(GLuint));
const char *str = hdr->ofs_text ? (char *)&buf[hdr->ofs_text] : "";
for(int i = 0; i < (int)hdr->num_meshes; i++) {
struct iqmmesh *m = &meshes[i];
char* material_name;
int flags = TEXTURE_MIPMAPS|TEXTURE_REPEAT; // LINEAR, NEAREST
int invalid = texture_checker().id;
textures[i] = invalid;
// remove any material+name from materials (.fbx)
// try left token first
if( 1 ) {
material_name = stringf("%s", &str[m->material]);
char* plus = strrchr(material_name, '+');
if (plus) { strcpy(plus, file_ext(material_name)); }
textures[i] = texture(material_name, flags).id;
}
// else try right token
if (textures[i] == invalid) {
material_name = file_normalize( stringf("%s", &str[m->material]) );
char* plus = strrchr(material_name, '+'), *slash = strrchr(material_name, '/');
if (plus) {
strcpy(slash ? slash + 1 : material_name, plus + 1);
textures[i] = texture(material_name, flags).id;
}
}
// else last resort
if (textures[i] == invalid) {
textures[i] = texture(material_name, flags).id; // needed?
}
if( textures[i] != invalid) {
PRINTF("loaded material[%d]: %s\n", i, &str[m->material]);
} else {
PRINTF("fail: material[%d] not found: %s\n", i, &str[m->material]);
PRINTF("warn: using placeholder material[%d]=texture_checker\n", i);
textures[i] = texture_checker().id; // placeholder
}
}
return true;
}
model_t model_from_mem(const void *mem, int len, int flags) {
const char *ptr = (const char *)mem;
static int shaderprog = -1;
if( shaderprog < 0 ) {
const char *symbols[] = { "{{include-shadowmap}}", fs_0_0_shadowmap_lit }; // #define RIM
shaderprog = shader(strlerp(1,symbols,vs_32344443_332_model), strlerp(1,symbols,fs_32_4_model), //fs,
"att_position,att_texcoord,att_normal,att_tangent,att_indexes,att_weights,att_color,att_bitangent","fragColor");
}
iqm_t *q = CALLOC(1, sizeof(iqm_t));
program = shaderprog;
int error = 1;
if( ptr && len ) {
struct iqmheader hdr; memcpy(&hdr, ptr, sizeof(hdr)); ptr += sizeof(hdr);
if( !memcmp(hdr.magic, IQM_MAGIC, sizeof(hdr.magic))) {
lil32p(&hdr.version, (sizeof(hdr) - sizeof(hdr.magic))/sizeof(uint32_t));
if(hdr.version == IQM_VERSION) {
buf = CALLOC(hdr.filesize, sizeof(uint8_t));
memcpy(buf + sizeof(hdr), ptr, hdr.filesize - sizeof(hdr));
error = 0;
if( hdr.num_meshes > 0 && !(flags & MODEL_NO_MESHES) ) error |= !model_load_meshes(q, &hdr);
if( hdr.num_meshes > 0 && !(flags & MODEL_NO_TEXTURES) ) error |= !model_load_textures(q, &hdr);
if( hdr.num_anims > 0 && !(flags & MODEL_NO_ANIMATIONS) ) error |= !model_load_anims(q, &hdr);
if( buf != meshdata && buf != animdata ) FREE(buf);
}
}
}
model_t m = {0};
if( error ) {
PRINTF("Error: cannot load %s", "model");
FREE(q), q = 0;
} else {
// m.boxes = bounds; // <@todo
m.num_meshes = nummeshes;
m.num_triangles = numtris;
m.num_joints = numjoints;
//m.num_poses = numposes;
m.num_anims = numanims;
m.num_frames = numframes;
m.iqm = q;
m.curframe = model_animate(m, 0);
id44(m.pivot);
model_set_state(m);
}
return m;
}
model_t model(const char *filename, int flags) {
int len; // vfs_pushd(filedir(filename))
char *ptr = vfs_load(filename, &len); // + vfs_popd
return model_from_mem( ptr, len, flags );
}
void model_get_bone_pose(model_t m, float curframe, int joint, vec3 *pos, vec3 *from) { // bugs?
if(!m.iqm) return;
iqm_t *q = m.iqm;
// mat34 *mat = &frames[(int)curframe * numjoints];
float *a = outframe[joint];
#if 0
mat34 m34 = {0};
muladd34(m34, outframe[int(att_indexes.x)], att_weights.x);
muladd34(m34, outframe[int(att_indexes.y)], att_weights.y);
muladd34(m34, outframe[int(att_indexes.z)], att_weights.z);
muladd34(m34, outframe[int(att_indexes.w)], att_weights.w);
objPos = vec4(att_position, 1.0) * m34;
#endif
*pos = vec3(a[12], a[13], a[14]);
if (joints[joint].parent >= 0) {
float *b = outframe[joints[joint].parent];
/*
@fixme: do as above
*/
*from = vec3(b[12], b[13], b[14]);
} else {
*from = vec3(0, 0, 0);
}
}
float model_animate_clip(model_t m, float curframe, int minframe, int maxframe, bool loop) {
if(!m.iqm) return -1;
iqm_t *q = m.iqm;
float retframe = -1;
if( numframes > 0 ) {
int frame1 = (int)/*floor*/(curframe);
int frame2 = frame1 + (curframe >= m.curframe ? 1 : -1);
float frameoffset = curframe - frame1;
if( loop ) {
int distance = (maxframe - minframe);
frame1 = frame1 >= maxframe ? minframe : frame1 < minframe ? maxframe - clampf(minframe - frame1, 0, distance) : frame1;
frame2 = frame2 >= maxframe ? minframe : frame2 < minframe ? maxframe - clampf(minframe - frame2, 0, distance) : frame2;
retframe = frame1 + frameoffset;
} else {
frame1 = clampf(frame1, minframe, maxframe);
frame2 = clampf(frame2, minframe, maxframe);
retframe = minf(frame1 + frameoffset, maxframe); // clamp to maxframe
}
mat34 *mat1 = &frames[frame1 * numjoints];
mat34 *mat2 = &frames[frame2 * numjoints];
// @todo: add animation blending and inter-frame blending here
// Interpolate matrixes between the two closest frames and concatenate with
// parent matrix if necessary. Concatenate the result with the inverse of the base pose.
for(int i = 0; i < numjoints; i++) {
mat34 mat; lerp34(mat, mat1[i], mat2[i], frameoffset);
if(joints[i].parent >= 0) multiply34x2(outframe[i], outframe[joints[i].parent], mat);
else copy34(outframe[i], mat);
}
// model_render_skeleton
if(0)
for( int i = 0; i < numjoints; i++ ) {
vec3 pos, from;
model_get_bone_pose(m, curframe, i, &pos, &from);
ddraw_line(pos, from);
}
}
return retframe;
}
float model_animate(model_t m, float curframe) {
if(!m.iqm) return -1;
iqm_t *q = m.iqm;
return model_animate_clip(m, curframe, 0, numframes-1, true);
}
static
void model_draw_call(model_t m) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
glBindVertexArray( vao );
struct iqmtriangle *tris = NULL;
for(int i = 0; i < nummeshes; i++) {
struct iqmmesh *m = &meshes[i];
glActiveTexture(GL_TEXTURE0);
glBindTexture(GL_TEXTURE_2D, textures[i] );
glUniform1i(glGetUniformLocation(program, "fsDiffTex"), 0 /*<-- unit!*/ );
glDrawElements(GL_TRIANGLES, 3*m->num_triangles, GL_UNSIGNED_INT, &tris[m->first_triangle]);
profile_incstat("drawcalls", +1);
profile_incstat("triangles", +m->num_triangles);
}
glBindVertexArray( 0 );
}
void model_render2(model_t m, mat44 proj, mat44 view, mat44 model, int shader) {
if(!m.iqm) return;
iqm_t *q = m.iqm;
model_set_uniforms(m, shader ? shader : program, proj, view, model);
model_draw_call(m);
}
void model_render(model_t m, mat44 proj, mat44 view, mat44 model) {
model_render2(m, proj, view, model, 0);
}
static
aabb aabb_transform( aabb A, mat44 M) {
// Based on "Transforming Axis-Aligned Bounding Boxes" by Jim Arvo, 1990
aabb B = { {M[12],M[13],M[14]}, {M[12],M[13],M[14]} }; // extract translation from mat44
for( int i = 0; i < 3; i++ )
for( int j = 0; j < 3; j++ ) {
float a = M[i*4+j] * j[&A.min.x]; // use mat33 from mat44
float b = M[i*4+j] * j[&A.max.x]; // use mat33 from mat44
if( a < b ) {
i[&B.min.x] += a;
i[&B.max.x] += b;
} else {
i[&B.min.x] += b;
i[&B.max.x] += a;
}
}
return B;
}
aabb model_aabb(model_t m, mat44 transform) {
iqm_t *q = m.iqm;
if( q && bounds ) {
int f = ( (int)m.curframe ) % (numframes + !numframes);
vec3 bbmin = ptr3(bounds[f].bbmin);
vec3 bbmax = ptr3(bounds[f].bbmax);
return aabb_transform(aabb(bbmin,bbmax), transform);
}
return aabb(vec3(0,0,0),vec3(0,0,0));
}
void model_destroy(model_t m) {
iqm_t *q = m.iqm;
// if(m.mesh) mesh_destroy(m.mesh);
FREE(outframe);
FREE(textures);
FREE(baseframe);
FREE(inversebaseframe);
if(animdata != meshdata) FREE(animdata);
//FREE(meshdata);
FREE(frames);
FREE(buf);
FREE(q);
}
#undef program
#undef meshdata
#undef animdata
#undef nummeshes
#undef numtris
#undef numverts
#undef numjoints
#undef numframes
#undef numanims
#undef meshes
#undef textures
#undef joints
#undef poses
#undef anims
#undef baseframe
#undef inversebaseframe
#undef outframe
#undef frames
#undef vao
#undef ibo
#undef vbo
#undef bonematsoffset
#undef buf
#undef bounds
#endif // RENDER_C
|
irbuilder_for_unsigned.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=45 -x c++ -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
extern "C" void workshareloop_unsigned(float *a, float *b, float *c, float *d) {
#pragma omp for
for (unsigned i = 33; i < 32000000; i += 7) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{[^@]+}}@workshareloop_unsigned
// CHECK-SAME: (float* noundef [[A:%.*]], float* noundef [[B:%.*]], float* noundef [[C:%.*]], float* noundef [[D:%.*]]) #[[ATTR0:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[C_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[D_ADDR:%.*]] = alloca float*, align 8
// CHECK-NEXT: [[I:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[AGG_CAPTURED:%.*]] = alloca [[STRUCT_ANON:%.*]], align 8
// CHECK-NEXT: [[AGG_CAPTURED1:%.*]] = alloca [[STRUCT_ANON_0:%.*]], align 4
// CHECK-NEXT: [[DOTCOUNT_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LASTITER:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_LOWERBOUND:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_UPPERBOUND:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[P_STRIDE:%.*]] = alloca i32, align 4
// CHECK-NEXT: store float* [[A]], float** [[A_ADDR]], align 8
// CHECK-NEXT: store float* [[B]], float** [[B_ADDR]], align 8
// CHECK-NEXT: store float* [[C]], float** [[C_ADDR]], align 8
// CHECK-NEXT: store float* [[D]], float** [[D_ADDR]], align 8
// CHECK-NEXT: store i32 33, i32* [[I]], align 4
// CHECK-NEXT: [[TMP0:%.*]] = getelementptr inbounds [[STRUCT_ANON]], %struct.anon* [[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* [[I]], i32** [[TMP0]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0]], %struct.anon.0* [[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: store i32 [[TMP2]], i32* [[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* [[DOTCOUNT_ADDR]], %struct.anon* [[AGG_CAPTURED]])
// CHECK-NEXT: [[DOTCOUNT:%.*]] = load i32, i32* [[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_PREHEADER:%.*]]
// CHECK: omp_loop.preheader:
// CHECK-NEXT: store i32 0, i32* [[P_LOWERBOUND]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = sub i32 [[DOTCOUNT]], 1
// CHECK-NEXT: store i32 [[TMP3]], i32* [[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* [[P_STRIDE]], align 4
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* [[P_LASTITER]], i32* [[P_LOWERBOUND]], i32* [[P_UPPERBOUND]], i32* [[P_STRIDE]], i32 1, i32 0)
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[P_LOWERBOUND]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[P_UPPERBOUND]], align 4
// CHECK-NEXT: [[TMP6:%.*]] = sub i32 [[TMP5]], [[TMP4]]
// CHECK-NEXT: [[TMP7:%.*]] = add i32 [[TMP6]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER:%.*]]
// CHECK: omp_loop.header:
// CHECK-NEXT: [[OMP_LOOP_IV:%.*]] = phi i32 [ 0, [[OMP_LOOP_PREHEADER]] ], [ [[OMP_LOOP_NEXT:%.*]], [[OMP_LOOP_INC:%.*]] ]
// CHECK-NEXT: br label [[OMP_LOOP_COND:%.*]]
// CHECK: omp_loop.cond:
// CHECK-NEXT: [[OMP_LOOP_CMP:%.*]] = icmp ult i32 [[OMP_LOOP_IV]], [[TMP7]]
// CHECK-NEXT: br i1 [[OMP_LOOP_CMP]], label [[OMP_LOOP_BODY:%.*]], label [[OMP_LOOP_EXIT:%.*]]
// CHECK: omp_loop.body:
// CHECK-NEXT: [[TMP8:%.*]] = add i32 [[OMP_LOOP_IV]], [[TMP4]]
// CHECK-NEXT: call void @__captured_stmt.1(i32* [[I]], i32 [[TMP8]], %struct.anon.0* [[AGG_CAPTURED1]])
// CHECK-NEXT: [[TMP9:%.*]] = load float*, float** [[B_ADDR]], align 8
// CHECK-NEXT: [[TMP10:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM:%.*]] = zext i32 [[TMP10]] to i64
// CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds float, float* [[TMP9]], i64 [[IDXPROM]]
// CHECK-NEXT: [[TMP11:%.*]] = load float, float* [[ARRAYIDX]], align 4
// CHECK-NEXT: [[TMP12:%.*]] = load float*, float** [[C_ADDR]], align 8
// CHECK-NEXT: [[TMP13:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM2:%.*]] = zext i32 [[TMP13]] to i64
// CHECK-NEXT: [[ARRAYIDX3:%.*]] = getelementptr inbounds float, float* [[TMP12]], i64 [[IDXPROM2]]
// CHECK-NEXT: [[TMP14:%.*]] = load float, float* [[ARRAYIDX3]], align 4
// CHECK-NEXT: [[MUL:%.*]] = fmul float [[TMP11]], [[TMP14]]
// CHECK-NEXT: [[TMP15:%.*]] = load float*, float** [[D_ADDR]], align 8
// CHECK-NEXT: [[TMP16:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM4:%.*]] = zext i32 [[TMP16]] to i64
// CHECK-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds float, float* [[TMP15]], i64 [[IDXPROM4]]
// CHECK-NEXT: [[TMP17:%.*]] = load float, float* [[ARRAYIDX5]], align 4
// CHECK-NEXT: [[MUL6:%.*]] = fmul float [[MUL]], [[TMP17]]
// CHECK-NEXT: [[TMP18:%.*]] = load float*, float** [[A_ADDR]], align 8
// CHECK-NEXT: [[TMP19:%.*]] = load i32, i32* [[I]], align 4
// CHECK-NEXT: [[IDXPROM7:%.*]] = zext i32 [[TMP19]] to i64
// CHECK-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds float, float* [[TMP18]], i64 [[IDXPROM7]]
// CHECK-NEXT: store float [[MUL6]], float* [[ARRAYIDX8]], align 4
// CHECK-NEXT: br label [[OMP_LOOP_INC]]
// CHECK: omp_loop.inc:
// CHECK-NEXT: [[OMP_LOOP_NEXT]] = add nuw i32 [[OMP_LOOP_IV]], 1
// CHECK-NEXT: br label [[OMP_LOOP_HEADER]]
// CHECK: omp_loop.exit:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @[[GLOB1]], i32 [[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: [[OMP_GLOBAL_THREAD_NUM9:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @[[GLOB2:[0-9]+]], i32 [[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label [[OMP_LOOP_AFTER:%.*]]
// CHECK: omp_loop.after:
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__captured_stmt
// CHECK-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[DISTANCE:%.*]], %struct.anon* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[DISTANCE_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon*, align 8
// CHECK-NEXT: [[DOTSTART:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTSTOP:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[DOTSTEP:%.*]] = alloca i32, align 4
// CHECK-NEXT: store i32* [[DISTANCE]], i32** [[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* [[__CONTEXT]], %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon*, %struct.anon** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON:%.*]], %struct.anon* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[TMP2]], align 4
// CHECK-NEXT: store i32 [[TMP3]], i32* [[DOTSTART]], align 4
// CHECK-NEXT: store i32 32000000, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: store i32 7, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[TMP4:%.*]] = load i32, i32* [[DOTSTART]], align 4
// CHECK-NEXT: [[TMP5:%.*]] = load i32, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[TMP4]], [[TMP5]]
// CHECK-NEXT: br i1 [[CMP]], label [[COND_TRUE:%.*]], label [[COND_FALSE:%.*]]
// CHECK: cond.true:
// CHECK-NEXT: [[TMP6:%.*]] = load i32, i32* [[DOTSTOP]], align 4
// CHECK-NEXT: [[TMP7:%.*]] = load i32, i32* [[DOTSTART]], align 4
// CHECK-NEXT: [[SUB:%.*]] = sub i32 [[TMP6]], [[TMP7]]
// CHECK-NEXT: [[TMP8:%.*]] = load i32, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[SUB1:%.*]] = sub i32 [[TMP8]], 1
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[SUB]], [[SUB1]]
// CHECK-NEXT: [[TMP9:%.*]] = load i32, i32* [[DOTSTEP]], align 4
// CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[ADD]], [[TMP9]]
// CHECK-NEXT: br label [[COND_END:%.*]]
// CHECK: cond.false:
// CHECK-NEXT: br label [[COND_END]]
// CHECK: cond.end:
// CHECK-NEXT: [[COND:%.*]] = phi i32 [ [[DIV]], [[COND_TRUE]] ], [ 0, [[COND_FALSE]] ]
// CHECK-NEXT: [[TMP10:%.*]] = load i32*, i32** [[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 [[COND]], i32* [[TMP10]], align 4
// CHECK-NEXT: ret void
//
//
// CHECK-LABEL: define {{[^@]+}}@__captured_stmt.1
// CHECK-SAME: (i32* noundef nonnull align 4 dereferenceable(4) [[LOOPVAR:%.*]], i32 noundef [[LOGICAL:%.*]], %struct.anon.0* noalias noundef [[__CONTEXT:%.*]]) #[[ATTR1]] {
// CHECK-NEXT: entry:
// CHECK-NEXT: [[LOOPVAR_ADDR:%.*]] = alloca i32*, align 8
// CHECK-NEXT: [[LOGICAL_ADDR:%.*]] = alloca i32, align 4
// CHECK-NEXT: [[__CONTEXT_ADDR:%.*]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* [[LOOPVAR]], i32** [[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 [[LOGICAL]], i32* [[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* [[__CONTEXT]], %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP0:%.*]] = load %struct.anon.0*, %struct.anon.0** [[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [[STRUCT_ANON_0:%.*]], %struct.anon.0* [[TMP0]], i32 0, i32 0
// CHECK-NEXT: [[TMP2:%.*]] = load i32, i32* [[TMP1]], align 4
// CHECK-NEXT: [[TMP3:%.*]] = load i32, i32* [[LOGICAL_ADDR]], align 4
// CHECK-NEXT: [[MUL:%.*]] = mul i32 7, [[TMP3]]
// CHECK-NEXT: [[ADD:%.*]] = add i32 [[TMP2]], [[MUL]]
// CHECK-NEXT: [[TMP4:%.*]] = load i32*, i32** [[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 [[ADD]], i32* [[TMP4]], align 4
// CHECK-NEXT: ret void
//
|
zero_length_array_section.c | // RUN: %libomptarget-compile-generic -fopenmp-version=51
// RUN: %libomptarget-run-fail-generic 2>&1 \
// RUN: | %fcheck-generic
#include <stdio.h>
int main() {
int arr[5];
// CHECK: addr=0x[[#%x,HOST_ADDR:]]
fprintf(stderr, "addr=%p\n", arr);
// CHECK-NOT: Libomptarget
#pragma omp target data map(alloc: arr[0:5])
#pragma omp target map(present, alloc: arr[0:0])
;
// CHECK: arr is present
fprintf(stderr, "arr is present\n");
// arr[0:0] doesn't create an actual mapping in the first directive.
//
// CHECK: Libomptarget message: device mapping required by 'present' map type modifier does not exist for host address 0x{{0*}}[[#HOST_ADDR]] (0 bytes)
// CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer ('present' map type modifier).
// CHECK: Libomptarget error: Call to targetDataBegin failed, abort target.
// CHECK: Libomptarget error: Failed to process data before launching the kernel.
// CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory
#pragma omp target data map(alloc: arr[0:0])
#pragma omp target map(present, alloc: arr[0:0])
;
// CHECK-NOT: arr is present
fprintf(stderr, "arr is present\n");
return 0;
}
|
convolution_1x1_int8.h | // BUG1989 is pleased to support the open source community by supporting ncnn available.
//
// author:BUG1989 (https://github.com/BUG1989/) Long-term support.
// author:FuGuangping (https://github.com/fu1899) Implemented the first version of INT8 quantization on ARMv7.
//
// Copyright (C) 2019 BUG1989. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __aarch64__
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
kernel_tm.create(outch, inch, (size_t)1u);
const int8_t *a = _kernel;
int8_t *sa = kernel_tm;
reorder_a((int8_t*)a, sa, outch, inch, inch);
}
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t* pData = bottom_blob;
int8_t *pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int32_t *pc = top_blob;
const int8_t *pa = kernel;
const int8_t *pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, nullptr, nullptr, opt);
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
const size_t n = bottom_blob.w * bottom_blob.h;
const size_t k = bottom_blob.c;
const size_t m = top_blob.c;
ncnn::Mat scales_tm(m);
ncnn::Mat bias_tm(m);
float* scales = scales_tm;
const float* bias = _bias;
// outptr0[0] = float2int8(((float)sum0 * scale_requant_in + bias0) * scale_requant_out);
// the equation could convert to:
// out = float2int8( (float)sum * (scale_requant_in * scale_requant_out) + (bias * scale_requant_out) )
// prebuild the list of (scales_requant_in*scale_requant_out)
for (size_t i = 0; i < m; ++i)
{
scales_tm[i] = scales_requant[2*i] * scales_requant[2*i + 1];
}
if (!_bias.empty())
{
for (size_t i = 0; i < m; ++i)
{
bias_tm[i] = bias[i] * scales_requant[2*i + 1];
}
bias = bias_tm;
}
ncnn::Mat bottom_tm(k * n, (size_t)1u, opt.workspace_allocator);
{
const int8_t *pData = bottom_blob;
int8_t *pReorder = bottom_tm;
reorder_b(pData, pReorder, k, n, bottom_blob.cstep);
}
// GEMM
int8_t *pc = top_blob;
const int8_t *pa = kernel;
const int8_t *pb = bottom_tm;
const size_t ldc = top_blob.cstep;
int8kernel((void*)pc, pa, pb, m, k, n, ldc, scales, (float*)bias, opt);
}
#else
static inline signed char float2int8(float v)
{
int int32 = round(v);
if (int32 > 127) return 127;
if (int32 < -127) return -127;
return (signed char)int32;
}
static void conv1x1s1_sgemm_transform_kernel_int8_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch)
{
const signed char* kernel = _kernel;
kernel_tm.create(4*4, inch/4 + inch%4, outch/4 + outch%4, (size_t)1u);
int p = 0;
for (; p+3<outch; p+=4)
{
const signed char* kernel0 = kernel + (p+0)*inch;
const signed char* kernel1 = kernel + (p+1)*inch;
const signed char* kernel2 = kernel + (p+2)*inch;
const signed char* kernel3 = kernel + (p+3)*inch;
signed char* ktmp = kernel_tm.channel(p/4);
for (int q=0; q<inch; q++)
{
// kernel0...3 0
ktmp[0] = kernel0[0];
ktmp[1] = kernel1[0];
ktmp[2] = kernel2[0];
ktmp[3] = kernel3[0];
ktmp += 4;
kernel0 += 1;
kernel1 += 1;
kernel2 += 1;
kernel3 += 1;
}
}
for (; p<outch; p++)
{
const signed char* kernel0 = kernel + p*inch;
signed char* ktmp = kernel_tm.channel(p/4 + p%4);
for (int q=0; q<inch; q++)
{
ktmp[0] = kernel0[0];
ktmp++;
kernel0++;
}
}
}
/*
* Convolution 1x1 quantized with sgemm int8
*/
static void conv1x1s1_sgemm_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
int* outptr0 = top_blob.channel(p);
int* outptr1 = top_blob.channel(p+1);
int* outptr2 = top_blob.channel(p+2);
int* outptr3 = top_blob.channel(p+3);
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d7}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q8, d4, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d5, d0[1] \n"
"vmlal.s16 q10, d4, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d5, d0[2] \n"
"vmlal.s16 q12, d4, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d5, d0[3] \n"
"vmlal.s16 q6, d6, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d7, d1[0] \n"
"vmlal.s16 q8, d6, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d7, d1[1] \n"
"vmlal.s16 q10, d6, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d7, d1[2] \n"
"vmlal.s16 q12, d6, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d7, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
"vst1.s32 {d16-d19}, [%1]! \n"
"vst1.s32 {d20-d23}, [%2]! \n"
"vst1.s32 {d24-d27}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4-d5}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d4, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d4, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d4, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d4, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d5, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d5, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d5, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d5, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d6, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d6, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d6, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d6, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d7, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d7, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d7, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d7, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
"vst1.s32 {d14-d15}, [%1]! \n"
"vst1.s32 {d16-d17}, [%2]! \n"
"vst1.s32 {d18-d19}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d20[0]}, [%0]! \n"
"vst1.s32 {d20[1]}, [%1]! \n"
"vst1.s32 {d21[0]}, [%2]! \n"
"vst1.s32 {d21[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch) // %12
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
int* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d15}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vst1.s32 {d12-d13}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch) // %6
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm int8
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// int* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// int sum = 0;
//
// const signed char* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const signed char* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
static void conv1x1s1_sgemm_int8_requant_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &kernel, const Mat &_bias, std::vector<float> scales_requant, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
int outch = top_blob.c;
const int size = w * h;
const float* bias = _bias;
// interleave
Mat tmp(8*4, inch/4+inch%4, size/8 + (size%8)/4 + size%4, 1u, opt.workspace_allocator);
{
int nn_size = size >> 3;
int remain_size_start = nn_size << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = ii * 8;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8);
for (int q=0; q<inch; q++)
{
#if __ARM_NEON
asm volatile(
"pld [%0, #64] \n"
"vld1.s8 {d0}, [%0] \n"
"vst1.s8 {d0}, [%1]! \n"
: "=r"(img0), // %0
"=r"(tmpptr) // %1
: "0"(img0),
"1"(tmpptr)
: "memory", "d0"
);
img0 += bottom_blob.cstep;
#else
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr[4] = img0[4];
tmpptr[5] = img0[5];
tmpptr[6] = img0[6];
tmpptr[7] = img0[7];
tmpptr += 8;
img0 += bottom_blob.cstep;
#endif // __ARM_NEON
}
}
nn_size = (size - remain_size_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int ii=0; ii<nn_size; ii++)
{
int i = remain_size_start + ii * 4;
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr[1] = img0[1];
tmpptr[2] = img0[2];
tmpptr[3] = img0[3];
tmpptr += 4;
img0 += bottom_blob.cstep;
}
}
remain_size_start += nn_size << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int i=remain_size_start; i<size; i++)
{
const signed char* img0 = bottom_blob.channel(0);
img0 += i;
signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
for (int q=0; q<inch; q++)
{
tmpptr[0] = img0[0];
tmpptr++;
img0 += bottom_blob.cstep;
}
}
}
// sgemm process
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
signed char* outptr0 = top_blob.channel(p);
signed char* outptr1 = top_blob.channel(p+1);
signed char* outptr2 = top_blob.channel(p+2);
signed char* outptr3 = top_blob.channel(p+3);
const float bias0 = bias ? bias[p] : 0.f;
const float bias1 = bias ? bias[p+1] : 0.f;
const float bias2 = bias ? bias[p+2] : 0.f;
const float bias3 = bias ? bias[p+3] : 0.f;
const float scale_requant_in0 = scales_requant[2*p];
const float scale_requant_out0 = scales_requant[2*p+1];
const float scale_requant_in1 = scales_requant[2*(p+1)];
const float scale_requant_out1 = scales_requant[2*(p+1)+1];
const float scale_requant_in2 = scales_requant[2*(p+2)];
const float scale_requant_out2 = scales_requant[2*(p+2)+1];
const float scale_requant_in3 = scales_requant[2*(p+3)];
const float scale_requant_out3 = scales_requant[2*(p+3)+1];
#if __ARM_NEON
float32x4_t _bias03, _scale_in03, _scale_out03;
_bias03[0] = bias0;
_bias03[1] = bias1;
_bias03[2] = bias2;
_bias03[3] = bias3;
_scale_in03[0] = scale_requant_in0;
_scale_in03[1] = scale_requant_in1;
_scale_in03[2] = scale_requant_in2;
_scale_in03[3] = scale_requant_in3;
_scale_out03[0] = scale_requant_out0;
_scale_out03[1] = scale_requant_out1;
_scale_out03[2] = scale_requant_out2;
_scale_out03[3] = scale_requant_out3;
#endif // __ARM_NEON
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"vmov.s32 q10, #0 \n"
"vmov.s32 q11, #0 \n"
"vmov.s32 q12, #0 \n"
"vmov.s32 q13, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d31}, [%4]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d31 \n"// a30-a37
"vmovl.s8 q4, d30 \n"// a20-a27
"vmovl.s8 q15, d29 \n"// a10-a17
"vmovl.s8 q14, d28 \n"// a00-a07
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a07) * k00
"vmlal.s16 q7, d29, d0[0] \n"
"vmlal.s16 q8, d28, d0[1] \n"// sum1 = (a00-a07) * k10
"vmlal.s16 q9, d29, d0[1] \n"
"vmlal.s16 q10, d28, d0[2] \n"// sum2 = (a00-a07) * k20
"vmlal.s16 q11, d29, d0[2] \n"
"vmlal.s16 q12, d28, d0[3] \n"// sum3 = (a00-a07) * k30
"vmlal.s16 q13, d29, d0[3] \n"
"vmlal.s16 q6, d30, d1[0] \n"// sum0 += (a10-a17) * k01
"vmlal.s16 q7, d31, d1[0] \n"
"vmlal.s16 q8, d30, d1[1] \n"// sum1 += (a10-a17) * k11
"vmlal.s16 q9, d31, d1[1] \n"
"vmlal.s16 q10, d30, d1[2] \n"// sum2 += (a10-a17) * k21
"vmlal.s16 q11, d31, d1[2] \n"
"vmlal.s16 q12, d30, d1[3] \n"// sum3 += (a10-a17) * k31
"vmlal.s16 q13, d31, d1[3] \n"
"vmlal.s16 q6, d8, d2[0] \n"// sum0 += (a20-a27) * k02
"vmlal.s16 q7, d9, d2[0] \n"
"vmlal.s16 q8, d8, d2[1] \n"// sum1 += (a20-a27) * k12
"vmlal.s16 q9, d9, d2[1] \n"
"vmlal.s16 q10, d8, d2[2] \n"// sum2 += (a20-a27) * k22
"vmlal.s16 q11, d9, d2[2] \n"
"vmlal.s16 q12, d8, d2[3] \n"// sum3 += (a20-a27) * k32
"vmlal.s16 q13, d9, d2[3] \n"
"vmlal.s16 q6, d10, d3[0] \n"// sum0 += (a30-a37) * k03
"vmlal.s16 q7, d11, d3[0] \n"
"vmlal.s16 q8, d10, d3[1] \n"// sum1 += (a30-a37) * k13
"vmlal.s16 q9, d11, d3[1] \n"
"vmlal.s16 q10, d10, d3[2] \n"// sum2 += (a30-a37) * k23
"vmlal.s16 q11, d11, d3[2] \n"
"vmlal.s16 q12, d10, d3[3] \n"// sum3 += (a30-a37) * k33
"vmlal.s16 q13, d11, d3[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"vmlal.s16 q8, d2, d0[1] \n"// sum1 += (a00-a07) * k10
"vmlal.s16 q9, d3, d0[1] \n"
"vmlal.s16 q10, d2, d0[2] \n"// sum2 += (a00-a07) * k20
"vmlal.s16 q11, d3, d0[2] \n"
"vmlal.s16 q12, d2, d0[3] \n"// sum3 += (a00-a07) * k30
"vmlal.s16 q13, d3, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[0] \n"
"vmul.f32 q8, q8, %e17[1] \n"
"vmul.f32 q9, q9, %e17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q14 \n"
"vadd.f32 q8, q8, q15 \n"
"vadd.f32 q9, q9, q15 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
// sum1
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %e18[1] \n"
"vmul.f32 q1, q9, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.8 {d16}, [%1]! \n"
// sum2
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
"vcvt.f32.s32 q11, q11 \n"
"vcvt.f32.s32 q12, q12 \n"
"vcvt.f32.s32 q13, q13 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %f17[0] \n"
"vmul.f32 q11, q11, %f17[0] \n"
"vmul.f32 q12, q12, %f17[1] \n"
"vmul.f32 q13, q13, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, q4 \n"
"vadd.f32 q11, q11, q4 \n"
"vadd.f32 q12, q12, q5 \n"
"vadd.f32 q13, q13, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %f18[0] \n"
"vmul.f32 q1, q11, %f18[0] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d20, q0 \n"
"vqmovn.s32 d21, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d20, q10 \n"
// save top_s8
"vst1.8 {d20}, [%2]! \n"
// sum3
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q12, %f18[1] \n"
"vmul.f32 q1, q13, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d24, q0 \n"
"vqmovn.s32 d25, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d24, q12 \n"
// save top_s8
"vst1.8 {d24}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13" ,"q14" ,"q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum0_4 = 0;
int sum0_5 = 0;
int sum0_6 = 0;
int sum0_7 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum1_4 = 0;
int sum1_5 = 0;
int sum1_6 = 0;
int sum1_7 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum2_4 = 0;
int sum2_5 = 0;
int sum2_6 = 0;
int sum2_7 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
int sum3_4 = 0;
int sum3_5 = 0;
int sum3_6 = 0;
int sum3_7 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"vmov.s32 q8, #0 \n"
"vmov.s32 q9, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d28-d29}, [%4]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q15, d29 \n"// a20-a23,a30-a33
"vmovl.s8 q14, d28 \n"// a00-a04,a10-a14
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d28, d0[0] \n"// sum0 = (a00-a03) * k00
"vmlal.s16 q7, d28, d0[1] \n"// sum1 = (a00-a03) * k10
"vmlal.s16 q8, d28, d0[2] \n"// sum2 = (a00-a03) * k20
"vmlal.s16 q9, d28, d0[3] \n"// sum3 = (a00-a03) * k30
"vmlal.s16 q6, d29, d1[0] \n"// sum0 += (a10-a13) * k01
"vmlal.s16 q7, d29, d1[1] \n"// sum1 += (a10-a13) * k11
"vmlal.s16 q8, d29, d1[2] \n"// sum2 += (a10-a13) * k21
"vmlal.s16 q9, d29, d1[3] \n"// sum3 += (a10-a13) * k31
"vmlal.s16 q6, d30, d2[0] \n"// sum0 += (a20-a23) * k02
"vmlal.s16 q7, d30, d2[1] \n"// sum1 += (a20-a23) * k12
"vmlal.s16 q8, d30, d2[2] \n"// sum2 += (a20-a23) * k22
"vmlal.s16 q9, d30, d2[3] \n"// sum3 += (a20-a23) * k32
"vmlal.s16 q6, d31, d3[0] \n"// sum0 += (a30-a33) * k03
"vmlal.s16 q7, d31, d3[1] \n"// sum1 += (a30-a33) * k13
"vmlal.s16 q8, d31, d3[2] \n"// sum2 += (a30-a33) * k23
"vmlal.s16 q9, d31, d3[3] \n"// sum3 += (a30-a33) * k33
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #4 \n"
"add %5, #4 \n"
"vmlal.s16 q6, d2, d0[0] \n"// sum0 += (a00-a03) * k00
"vmlal.s16 q7, d2, d0[1] \n"// sum1 += (a00-a03) * k10
"vmlal.s16 q8, d2, d0[2] \n"// sum2 += (a00-a03) * k20
"vmlal.s16 q9, d2, d0[3] \n"// sum3 += (a00-a03) * k30
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
"vdup.f32 q14, %13 \n" // bias
"vdup.f32 q15, %14 \n" // bias
"vdup.f32 q4, %15 \n" // bias
"vdup.f32 q5, %16 \n" // bias
// sum0-1
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
"vcvt.f32.s32 q8, q8 \n"
"vcvt.f32.s32 q9, q9 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q6, q6, %e17[0] \n"
"vmul.f32 q7, q7, %e17[1] \n"
"vmul.f32 q8, q8, %f17[0] \n"
"vmul.f32 q9, q9, %f17[1] \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, q14 \n"
"vadd.f32 q7, q7, q15 \n"
"vadd.f32 q8, q8, q4 \n"
"vadd.f32 q9, q9, q5 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %e18[0] \n"
"vmul.f32 q1, q7, %e18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.s32 {d12[0]}, [%0]! \n"
"vst1.s32 {d12[1]}, [%1]! \n"
// sum1-2
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q8, %f18[0] \n"
"vmul.f32 q1, q9, %f18[1] \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d16, q0 \n"
"vqmovn.s32 d17, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d16, q8 \n"
// save top_s8
"vst1.s32 {d16[0]}, [%2]! \n"
"vst1.s32 {d16[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"r"(bias0), // %13
"r"(bias1), // %14
"r"(bias2), // %15
"r"(bias3), // %16
"w"(_scale_in03), // %17
"w"(_scale_out03) // %18
: "cc", "memory", "r4", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#else
int sum0_0 = 0;
int sum0_1 = 0;
int sum0_2 = 0;
int sum0_3 = 0;
int sum1_0 = 0;
int sum1_1 = 0;
int sum1_2 = 0;
int sum1_3 = 0;
int sum2_0 = 0;
int sum2_1 = 0;
int sum2_2 = 0;
int sum2_3 = 0;
int sum3_0 = 0;
int sum3_1 = 0;
int sum3_2 = 0;
int sum3_3 = 0;
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4);
#if __ARM_NEON
asm volatile(
// inch loop
"veor q6, q6, q6 \n"
"veor q7, q7, q7 \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"vmov.s32 q10, #0 \n"
"lsr r4, %12, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%4, #128] \n"
"vld1.s8 {d4}, [%4] \n"// tmpr a00,a10,a20,a30 a(inch)(data)
"add %4, #4 \n"
"vmovl.s8 q2, d4 \n"// a00,a10,a20,a30
"vld1.s8 {d0-d1}, [%5]! \n"// kptr k00-k30,k01-k31,k02-k32,k03-k33 k(outch)(inch)
"vmovl.s8 q1, d1 \n"// k02-k32,k03-k33
"vmovl.s8 q0, d0 \n"// k00-k30,k01-k31
"vmlal.s16 q6, d0, d4[0] \n"// (k00-k30) * a00
"vmlal.s16 q7, d1, d4[1] \n"// (k01-k31) * a10
"vmlal.s16 q8, d2, d4[2] \n"// (k02-k32) * a20
"vmlal.s16 q9, d3, d4[3] \n"// (k03-k33) * a30
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"vadd.s32 q6, q6, q7 \n"
"vadd.s32 q9, q9, q8 \n"
"vadd.s32 q10, q6, q9 \n"
"1: \n"
// remain loop
"and r4, %12, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%4] \n"// tmpr a00 a(inch)(data)
"vld1.s8 {d0}, [%5] \n"// kptr k00-k30 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %4, #1 \n"
"add %5, #4 \n"
"vmlal.s16 q10, d0, d2[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q10, q10 \n"
// top_f32 = top_f32 * scale_int
"vmul.f32 q10, q10, %q14 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q10, q10, %q13 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q10, %q15 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12[0]}, [%0]! \n"
"vst1.8 {d12[1]}, [%1]! \n"
"vst1.8 {d12[2]}, [%2]! \n"
"vst1.8 {d12[3]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(inch), // %12
"w"(_bias03), // %13
"w"(_scale_in03), // %14
"w"(_scale_out03) // %15
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
signed char* outptr0 = out0;
const float bias0 = bias ? bias[p] : 0.f;
const float scale_requant_in = scales_requant[2*p];
const float scale_requant_out = scales_requant[2*p+1];
#if __ARM_NEON
float32x4_t _bias0 = vdupq_n_f32(bias0);
float32x4_t _scale_in = vdupq_n_f32(scale_requant_in);
float32x4_t _scale_out = vdupq_n_f32(scale_requant_out);
#endif // __ARM_NEON
int i = 0;
for (; i+7<size; i+=8)
{
const signed char* tmpptr = tmp.channel(i/8);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"vmov.s32 q7, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%1, #128] \n"
"vld1.s8 {d4-d7}, [%1]! \n"// tmpr a00-a07,a10-a17,a20-a27,a30-a37 a(inch)(data)
"vmovl.s8 q5, d7 \n"// a30-a37
"vmovl.s8 q4, d6 \n"// a20-a27
"vmovl.s8 q3, d5 \n"// a10-a17
"vmovl.s8 q2, d4 \n"// a00-a07
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d5, d0[0] \n"
"vmlal.s16 q6, d6, d0[1] \n"// (a10-a17) * k01
"vmlal.s16 q7, d7, d0[1] \n"
"vmlal.s16 q6, d8, d0[2] \n"// (a20-a27) * k02
"vmlal.s16 q7, d9, d0[2] \n"
"vmlal.s16 q6, d10, d0[3] \n"// (a30-a37) * k03
"vmlal.s16 q7, d11, d0[3] \n"
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1]! \n"// tmpr a00-a07 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a07) * k00
"vmlal.s16 q7, d3, d0[0] \n"
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
"vcvt.f32.s32 q7, q7 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
"vmul.f32 q7, q7, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
"vadd.f32 q7, q7, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
"vmul.f32 q1, q7, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
"vcvtr.s32.f32 s4, s4 \n"
"vcvtr.s32.f32 s5, s5 \n"
"vcvtr.s32.f32 s6, s6 \n"
"vcvtr.s32.f32 s7, s7 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
"vqmovn.s32 d13, q1 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
// save top_s8
"vst1.8 {d12}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
int sum4 = 0;
int sum5 = 0;
int sum6 = 0;
int sum7 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4);
const signed char* kptr = kernel.channel(p/4 + p%4);
#if __ARM_NEON
asm volatile(
// inch loop
"vmov.s32 q6, #0 \n"
"lsr r4, %6, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"// for(; nn != 0; nn--)
"pld [%2, #128] \n"
"vld1.s8 {d4-d5}, [%1]! \n"// tmpr a00-a03,a10-a13,a20-a23,a30-a33 a(inch)(data)
"vmovl.s8 q3, d5 \n"// a20-a23,a30-a33
"vmovl.s8 q2, d4 \n"// a00-a03,a10-a13
"vld1.s8 {d0}, [%2] \n"// kptr k00,k01,k02,k03 k(outch)(inch)
"vmovl.s8 q0, d0 \n"// k00,k01,k02,k03
"add %2, #4 \n"
"vmlal.s16 q6, d4, d0[0] \n"// (a00-a03) * k00
"vmlal.s16 q6, d5, d0[1] \n"// (a10-a13) * k01
"vmlal.s16 q6, d6, d0[2] \n"// (a20-a23) * k02
"vmlal.s16 q6, d7, d0[3] \n"// (a30-a33) * k03
"subs r4, r4, #1 \n"
"bne 0b \n"// end for
"1: \n"
// remain loop
"and r4, %6, #3 \n"// r4 = remain = inch & 3
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"// for(; remain != 0; remain--)
"vld1.s8 {d2}, [%1] \n"// tmpr a00-a03 a(inch)(data)
"vld1.s8 {d0}, [%2] \n"// kptr k00 k(outch)(inch)
"vmovl.s8 q1, d2 \n"
"vmovl.s8 q0, d0 \n"
"add %1, #4 \n"
"add %2, #1 \n"
"vmlal.s16 q6, d2, d0[0] \n"// (a00-a03) * k00
"subs r4, r4, #1 \n"
"bne 2b \n"
"3: \n"// store the result to memory
// top_s32 -> top_f32
"vcvt.f32.s32 q6, q6 \n"
// top_f32 = top_f32 * scale_in
"vmul.f32 q6, q6, %q8 \n"
// top_f32 = top_f32 + bias
"vadd.f32 q6, q6, %q7 \n"
// top_f32 = top_f32 * scale_out
"vmul.f32 q0, q6, %q9 \n"
// top_f32 -> top_s32
"vcvtr.s32.f32 s0, s0 \n"
"vcvtr.s32.f32 s1, s1 \n"
"vcvtr.s32.f32 s2, s2 \n"
"vcvtr.s32.f32 s3, s3 \n"
// top_s32 -> top_s16
"vqmovn.s32 d12, q0 \n"
// top_s16 -> top_s8
"vqmovn.s16 d12, q6 \n"
"vst1.s32 {d12[0]}, [%0]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(inch), // %6
"w"(_bias0), // %7
"w"(_scale_in), // %8
"w"(_scale_out) // %9
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6"
);
#else
int sum0 = 0;
int sum1 = 0;
int sum2 = 0;
int sum3 = 0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const signed char* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const signed char* kptr = kernel.channel(p/4 + p%4);
int q = 0;
int sum0 = 0;
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
}
#endif
static void conv1x1s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int kernel_w = 1;
int kernel_h = 1;
int stride_w = 1;
int stride_h = 1;
conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt);
}
static void conv1x1s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int kernel_w = 1;
int kernel_h = 1;
int stride_w = 2;
int stride_h = 2;
conv_im2col_sgemm_int8_neon(bottom_blob, top_blob, _kernel, kernel_w, kernel_h, stride_w, stride_h, opt);
}
|
transform.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M %
% T R R A A NN N SS F O O R R MM MM %
% T RRRR AAAAA N N N SSS FFF O O RRRR M M M %
% T R R A A N NN SS F O O R R M M %
% T R R A A N N SSSSS F OOO R R M M %
% %
% %
% MagickCore Image Transform Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile-private.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/resize.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o O r i e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoOrientImage() adjusts an image so that its orientation is suitable for
% viewing (i.e. top-left orientation).
%
% The format of the AutoOrientImage method is:
%
% Image *AutoOrientImage(const Image *image,
% const OrientationType orientation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image.
%
% o orientation: Current image orientation.
%
% o exception: Return any errors or warnings in this structure.
%
*/
MagickExport Image *AutoOrientImage(const Image *image,
const OrientationType orientation,ExceptionInfo *exception)
{
Image
*orient_image;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
orient_image=(Image *) NULL;
switch(orientation)
{
case UndefinedOrientation:
case TopLeftOrientation:
default:
{
orient_image=CloneImage(image,0,0,MagickTrue,exception);
break;
}
case TopRightOrientation:
{
orient_image=FlopImage(image,exception);
break;
}
case BottomRightOrientation:
{
orient_image=RotateImage(image,180.0,exception);
break;
}
case BottomLeftOrientation:
{
orient_image=FlipImage(image,exception);
break;
}
case LeftTopOrientation:
{
orient_image=TransposeImage(image,exception);
break;
}
case RightTopOrientation:
{
orient_image=RotateImage(image,90.0,exception);
break;
}
case RightBottomOrientation:
{
orient_image=TransverseImage(image,exception);
break;
}
case LeftBottomOrientation:
{
orient_image=RotateImage(image,270.0,exception);
break;
}
}
if (orient_image != (Image *) NULL)
orient_image->orientation=TopLeftOrientation;
return(orient_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C h o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ChopImage() removes a region of an image and collapses the image to occupy
% the removed portion.
%
% The format of the ChopImage method is:
%
% Image *ChopImage(const Image *image,const RectangleInfo *chop_info)
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o chop_info: Define the region of the image to chop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info,
ExceptionInfo *exception)
{
#define ChopImageTag "Chop/Image"
CacheView
*chop_view,
*image_view;
Image
*chop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
extent;
ssize_t
y;
/*
Check chop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(chop_info != (RectangleInfo *) NULL);
if (((chop_info->x+(ssize_t) chop_info->width) < 0) ||
((chop_info->y+(ssize_t) chop_info->height) < 0) ||
(chop_info->x > (ssize_t) image->columns) ||
(chop_info->y > (ssize_t) image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
extent=(*chop_info);
if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns)
extent.width=(size_t) ((ssize_t) image->columns-extent.x);
if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows)
extent.height=(size_t) ((ssize_t) image->rows-extent.y);
if (extent.x < 0)
{
extent.width-=(size_t) (-extent.x);
extent.x=0;
}
if (extent.y < 0)
{
extent.height-=(size_t) (-extent.y);
extent.y=0;
}
chop_image=CloneImage(image,image->columns-extent.width,image->rows-
extent.height,MagickTrue,exception);
if (chop_image == (Image *) NULL)
return((Image *) NULL);
/*
Extract chop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
chop_view=AcquireAuthenticCacheView(chop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,chop_image,extent.y,1)
#endif
for (y=0; y < (ssize_t) extent.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
/*
Extract chop image.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,chop_image,image->rows-(extent.y+extent.height),1)
#endif
for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width)))
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(chop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(chop_image,channel,p[i],q);
}
q+=GetPixelChannels(chop_image);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ChopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
chop_view=DestroyCacheView(chop_view);
image_view=DestroyCacheView(image_view);
chop_image->type=image->type;
if (status == MagickFalse)
chop_image=DestroyImage(chop_image);
return(chop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C M Y K I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a
% single image.
%
% The format of the ConsolidateCMYKImage method is:
%
% Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image sequence.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ConsolidateCMYKImages(const Image *images,
ExceptionInfo *exception)
{
CacheView
*cmyk_view,
*image_view;
Image
*cmyk_image,
*cmyk_images;
register ssize_t
j;
ssize_t
y;
/*
Consolidate separate C, M, Y, and K planes into a single image.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cmyk_images=NewImageList();
for (j=0; j < (ssize_t) GetImageListLength(images); j+=4)
{
register ssize_t
i;
assert(images != (Image *) NULL);
cmyk_image=CloneImage(images,0,0,MagickTrue,
exception);
if (cmyk_image == (Image *) NULL)
break;
if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse)
break;
(void) SetImageColorspace(cmyk_image,CMYKColorspace,exception);
for (i=0; i < 4; i++)
{
image_view=AcquireVirtualCacheView(images,exception);
cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception);
for (y=0; y < (ssize_t) images->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception);
q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) images->columns; x++)
{
Quantum
pixel;
pixel=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p));
switch (i)
{
case 0: SetPixelCyan(cmyk_image,pixel,q); break;
case 1: SetPixelMagenta(cmyk_image,pixel,q); break;
case 2: SetPixelYellow(cmyk_image,pixel,q); break;
case 3: SetPixelBlack(cmyk_image,pixel,q); break;
default: break;
}
p+=GetPixelChannels(images);
q+=GetPixelChannels(cmyk_image);
}
if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse)
break;
}
cmyk_view=DestroyCacheView(cmyk_view);
image_view=DestroyCacheView(image_view);
images=GetNextImageInList(images);
if (images == (Image *) NULL)
break;
}
AppendImageToList(&cmyk_images,cmyk_image);
}
return(cmyk_images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImage() extracts a region of the image starting at the offset defined
% by geometry. Region must be fully defined, and no special handling of
% geometry flags is performed.
%
% The format of the CropImage method is:
%
% Image *CropImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to crop with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry,
ExceptionInfo *exception)
{
#define CropImageTag "Crop/Image"
CacheView
*crop_view,
*image_view;
Image
*crop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
OffsetInfo
offset;
RectangleInfo
bounding_box,
page;
ssize_t
y;
/*
Check crop geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
bounding_box=image->page;
if ((bounding_box.width == 0) || (bounding_box.height == 0))
{
bounding_box.width=image->columns;
bounding_box.height=image->rows;
}
page=(*geometry);
if (page.width == 0)
page.width=bounding_box.width;
if (page.height == 0)
page.height=bounding_box.height;
if (((bounding_box.x-page.x) >= (ssize_t) page.width) ||
((bounding_box.y-page.y) >= (ssize_t) page.height) ||
((page.x-bounding_box.x) > (ssize_t) image->columns) ||
((page.y-bounding_box.y) > (ssize_t) image->rows))
{
/*
Crop is not within virtual canvas, return 1 pixel transparent image.
*/
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=bounding_box;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
if (crop_image->dispose == BackgroundDispose)
crop_image->dispose=NoneDispose;
return(crop_image);
}
if ((page.x < 0) && (bounding_box.x >= 0))
{
page.width+=page.x-bounding_box.x;
page.x=0;
}
else
{
page.width-=bounding_box.x-page.x;
page.x-=bounding_box.x;
if (page.x < 0)
page.x=0;
}
if ((page.y < 0) && (bounding_box.y >= 0))
{
page.height+=page.y-bounding_box.y;
page.y=0;
}
else
{
page.height-=bounding_box.y-page.y;
page.y-=bounding_box.y;
if (page.y < 0)
page.y=0;
}
if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns)
page.width=image->columns-page.x;
if ((geometry->width != 0) && (page.width > geometry->width))
page.width=geometry->width;
if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows)
page.height=image->rows-page.y;
if ((geometry->height != 0) && (page.height > geometry->height))
page.height=geometry->height;
bounding_box.x+=page.x;
bounding_box.y+=page.y;
if ((page.width == 0) || (page.height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionWarning,
"GeometryDoesNotContainImage","`%s'",image->filename);
return((Image *) NULL);
}
/*
Initialize crop image attributes.
*/
crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->page.width=image->page.width;
crop_image->page.height=image->page.height;
offset.x=(ssize_t) (bounding_box.x+bounding_box.width);
offset.y=(ssize_t) (bounding_box.y+bounding_box.height);
if ((offset.x > (ssize_t) image->page.width) ||
(offset.y > (ssize_t) image->page.height))
{
crop_image->page.width=bounding_box.width;
crop_image->page.height=bounding_box.height;
}
crop_image->page.x=bounding_box.x;
crop_image->page.y=bounding_box.y;
/*
Crop image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
crop_view=AcquireAuthenticCacheView(crop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,crop_image,crop_image->rows,1)
#endif
for (y=0; y < (ssize_t) crop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns,
1,exception);
q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) crop_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(crop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(crop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(crop_image);
}
if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CropImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
crop_view=DestroyCacheView(crop_view);
image_view=DestroyCacheView(image_view);
crop_image->type=image->type;
if (status == MagickFalse)
crop_image=DestroyImage(crop_image);
return(crop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C r o p I m a g e T o T i l e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CropImageToTiles() crops a single image, into a possible list of tiles.
% This may include a single sub-region of the image. This basically applies
% all the normal geometry flags for Crop.
%
% Image *CropImageToTiles(const Image *image,
% const RectangleInfo *crop_geometry, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double ConstrainPixelOffset(double x)
{
if (x < (double) -(SSIZE_MAX-512))
return((double) -(SSIZE_MAX-512));
if (x > (double) (SSIZE_MAX-512))
return((double) (SSIZE_MAX-512));
return(x);
}
static inline ssize_t PixelRoundOffset(double x)
{
/*
Round the fraction to nearest integer.
*/
if ((x-floor(x)) < (ceil(x)-x))
return((ssize_t) floor(ConstrainPixelOffset(x)));
return((ssize_t) ceil(ConstrainPixelOffset(x)));
}
MagickExport Image *CropImageToTiles(const Image *image,
const char *crop_geometry,ExceptionInfo *exception)
{
Image
*next,
*crop_image;
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
crop_image=NewImageList();
next=NewImageList();
flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception);
if ((flags & AreaValue) != 0)
{
PointInfo
delta,
offset;
RectangleInfo
crop;
size_t
height,
width;
/*
Crop into NxM tiles (@ flag).
*/
width=image->columns;
height=image->rows;
if (geometry.width == 0)
geometry.width=1;
if (geometry.height == 0)
geometry.height=1;
if ((flags & AspectValue) == 0)
{
width-=(geometry.x < 0 ? -1 : 1)*geometry.x;
height-=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
else
{
width+=(geometry.x < 0 ? -1 : 1)*geometry.x;
height+=(geometry.y < 0 ? -1 : 1)*geometry.y;
}
delta.x=(double) width/geometry.width;
delta.y=(double) height/geometry.height;
if (delta.x < 1.0)
delta.x=1.0;
if (delta.y < 1.0)
delta.y=1.0;
for (offset.y=0; offset.y < (double) height; )
{
if ((flags & AspectValue) == 0)
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? 0 : geometry.y)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double) (offset.y+
(geometry.y < 0 ? 0 : geometry.y)));
}
else
{
crop.y=PixelRoundOffset((double) (offset.y-
(geometry.y > 0 ? geometry.y : 0)));
offset.y+=delta.y; /* increment now to find width */
crop.height=(size_t) PixelRoundOffset((double)
(offset.y+(geometry.y < -1 ? geometry.y : 0)));
}
crop.height-=crop.y;
crop.y+=image->page.y;
for (offset.x=0; offset.x < (double) width; )
{
if ((flags & AspectValue) == 0)
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? 0 : geometry.x)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? 0 : geometry.x)));
}
else
{
crop.x=PixelRoundOffset((double) (offset.x-
(geometry.x > 0 ? geometry.x : 0)));
offset.x+=delta.x; /* increment now to find height */
crop.width=(size_t) PixelRoundOffset((double) (offset.x+
(geometry.x < 0 ? geometry.x : 0)));
}
crop.width-=crop.x;
crop.x+=image->page.x;
next=CropImage(image,&crop,exception);
if (next != (Image *) NULL)
AppendImageToList(&crop_image,next);
}
}
ClearMagickException(exception);
return(crop_image);
}
if (((geometry.width == 0) && (geometry.height == 0)) ||
((flags & XValue) != 0) || ((flags & YValue) != 0))
{
/*
Crop a single region at +X+Y.
*/
crop_image=CropImage(image,&geometry,exception);
if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0))
{
crop_image->page.width=geometry.width;
crop_image->page.height=geometry.height;
crop_image->page.x-=geometry.x;
crop_image->page.y-=geometry.y;
}
return(crop_image);
}
if ((image->columns > geometry.width) || (image->rows > geometry.height))
{
RectangleInfo
page;
size_t
height,
width;
ssize_t
x,
y;
/*
Crop into tiles of fixed size WxH.
*/
page=image->page;
if (page.width == 0)
page.width=image->columns;
if (page.height == 0)
page.height=image->rows;
width=geometry.width;
if (width == 0)
width=page.width;
height=geometry.height;
if (height == 0)
height=page.height;
next=NewImageList();
for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height)
{
for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width)
{
geometry.width=width;
geometry.height=height;
geometry.x=x;
geometry.y=y;
next=CropImage(image,&geometry,exception);
if (next == (Image *) NULL)
break;
AppendImageToList(&crop_image,next);
}
if (next == (Image *) NULL)
break;
}
return(crop_image);
}
return(CloneImage(image,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x c e r p t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExcerptImage() returns a excerpt of the image as defined by the geometry.
%
% The format of the ExcerptImage method is:
%
% Image *ExcerptImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExcerptImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define ExcerptImageTag "Excerpt/Image"
CacheView
*excerpt_view,
*image_view;
Image
*excerpt_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Allocate excerpt image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (excerpt_image == (Image *) NULL)
return((Image *) NULL);
/*
Excerpt each row.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,excerpt_image,excerpt_image->rows,1)
#endif
for (y=0; y < (ssize_t) excerpt_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y,
geometry->width,1,exception);
q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) excerpt_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel);
if ((traits == UndefinedPixelTrait) ||
(excerpt_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(excerpt_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(excerpt_image);
}
if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ExcerptImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
excerpt_view=DestroyCacheView(excerpt_view);
image_view=DestroyCacheView(image_view);
excerpt_image->type=image->type;
if (status == MagickFalse)
excerpt_image=DestroyImage(excerpt_image);
return(excerpt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E x t e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExtentImage() extends the image as defined by the geometry, gravity, and
% image background color. Set the (x,y) offset of the geometry to move the
% original image relative to the extended image.
%
% The format of the ExtentImage method is:
%
% Image *ExtentImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to extend with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ExtentImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
Image
*extent_image;
MagickBooleanType
status;
const StringInfo
*profile;
/*
Allocate extent image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue,
exception);
if (extent_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageBackgroundColor(extent_image,exception);
if (status == MagickFalse)
{
extent_image=DestroyImage(extent_image);
return((Image *) NULL);
}
status=CompositeImage(extent_image,image,image->compose,MagickTrue,
-geometry->x,-geometry->y,exception);
if (status != MagickFalse)
{
profile=GetImageProfile(extent_image,"8bim");
if (profile != (StringInfo *) NULL)
Update8BIMClipPath(profile,image->columns,image->rows,geometry);
}
return(extent_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlipImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis.
%
% The format of the FlipImage method is:
%
% Image *FlipImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception)
{
#define FlipImageTag "Flip/Image"
CacheView
*flip_view,
*image_view;
Image
*flip_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flip_image=CloneImage(image,0,0,MagickTrue,exception);
if (flip_image == (Image *) NULL)
return((Image *) NULL);
/*
Flip image.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flip_view=AcquireAuthenticCacheView(flip_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flip_image,flip_image->rows,1)
#endif
for (y=0; y < (ssize_t) flip_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y-
1),flip_image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) flip_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flip_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flip_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(flip_image);
}
if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlipImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flip_view=DestroyCacheView(flip_view);
image_view=DestroyCacheView(image_view);
flip_image->type=image->type;
if (page.height != 0)
page.y=(ssize_t) (page.height-flip_image->rows-page.y);
flip_image->page=page;
if (status == MagickFalse)
flip_image=DestroyImage(flip_image);
return(flip_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F l o p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FlopImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis.
%
% The format of the FlopImage method is:
%
% Image *FlopImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception)
{
#define FlopImageTag "Flop/Image"
CacheView
*flop_view,
*image_view;
Image
*flop_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
flop_image=CloneImage(image,0,0,MagickTrue,exception);
if (flop_image == (Image *) NULL)
return((Image *) NULL);
/*
Flop each row.
*/
status=MagickTrue;
progress=0;
page=image->page;
image_view=AcquireVirtualCacheView(image,exception);
flop_view=AcquireAuthenticCacheView(flop_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,flop_image,flop_image->rows,1)
#endif
for (y=0; y < (ssize_t) flop_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(flop_image)*flop_image->columns;
for (x=0; x < (ssize_t) flop_image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(flop_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel);
if ((traits == UndefinedPixelTrait) ||
(flop_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(flop_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,FlopImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
flop_view=DestroyCacheView(flop_view);
image_view=DestroyCacheView(image_view);
flop_image->type=image->type;
if (page.width != 0)
page.x=(ssize_t) (page.width-flop_image->columns-page.x);
flop_image->page=page;
if (status == MagickFalse)
flop_image=DestroyImage(flop_image);
return(flop_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R o l l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RollImage() offsets an image as defined by x_offset and y_offset.
%
% The format of the RollImage method is:
%
% Image *RollImage(const Image *image,const ssize_t x_offset,
% const ssize_t y_offset,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x_offset: the number of columns to roll in the horizontal direction.
%
% o y_offset: the number of rows to roll in the vertical direction.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy,
const ssize_t dx,const ssize_t dy,ExceptionInfo *exception)
{
CacheView
*source_view,
*destination_view;
MagickBooleanType
status;
ssize_t
y;
if (columns == 0)
return(MagickTrue);
status=MagickTrue;
source_view=AcquireVirtualCacheView(source,exception);
destination_view=AcquireAuthenticCacheView(destination,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(source,destination,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
/*
Transfer scanline.
*/
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception);
q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(source); i++)
{
PixelChannel channel = GetPixelChannelChannel(source,i);
PixelTrait source_traits=GetPixelChannelTraits(source,channel);
PixelTrait destination_traits=GetPixelChannelTraits(destination,
channel);
if ((source_traits == UndefinedPixelTrait) ||
(destination_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(destination,channel,p[i],q);
}
p+=GetPixelChannels(source);
q+=GetPixelChannels(destination);
}
sync=SyncCacheViewAuthenticPixels(destination_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
destination_view=DestroyCacheView(destination_view);
source_view=DestroyCacheView(source_view);
return(status);
}
MagickExport Image *RollImage(const Image *image,const ssize_t x_offset,
const ssize_t y_offset,ExceptionInfo *exception)
{
#define RollImageTag "Roll/Image"
Image
*roll_image;
MagickStatusType
status;
RectangleInfo
offset;
/*
Initialize roll image attributes.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
roll_image=CloneImage(image,0,0,MagickTrue,exception);
if (roll_image == (Image *) NULL)
return((Image *) NULL);
offset.x=x_offset;
offset.y=y_offset;
while (offset.x < 0)
offset.x+=(ssize_t) image->columns;
while (offset.x >= (ssize_t) image->columns)
offset.x-=(ssize_t) image->columns;
while (offset.y < 0)
offset.y+=(ssize_t) image->rows;
while (offset.y >= (ssize_t) image->rows)
offset.y-=(ssize_t) image->rows;
/*
Roll image.
*/
status=CopyImageRegion(roll_image,image,(size_t) offset.x,
(size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows-
offset.y,0,0,exception);
(void) SetImageProgress(image,RollImageTag,0,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,
(size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0,
exception);
(void) SetImageProgress(image,RollImageTag,1,3);
status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows-
offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,2,3);
status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows-
offset.y,0,0,offset.x,offset.y,exception);
(void) SetImageProgress(image,RollImageTag,3,3);
roll_image->type=image->type;
if (status == MagickFalse)
roll_image=DestroyImage(roll_image);
return(roll_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h a v e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShaveImage() shaves pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the ShaveImage method is:
%
% Image *ShaveImage(const Image *image,const RectangleInfo *shave_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o shave_image: Method ShaveImage returns a pointer to the shaved
% image. A null image is returned if there is a memory shortage or
% if the image width or height is zero.
%
% o image: the image.
%
% o shave_info: Specifies a pointer to a RectangleInfo which defines the
% region of the image to crop.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *ShaveImage(const Image *image,
const RectangleInfo *shave_info,ExceptionInfo *exception)
{
Image
*shave_image;
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (((2*shave_info->width) >= image->columns) ||
((2*shave_info->height) >= image->rows))
ThrowImageException(OptionWarning,"GeometryDoesNotContainImage");
SetGeometry(image,&geometry);
geometry.width-=2*shave_info->width;
geometry.height-=2*shave_info->height;
geometry.x=(ssize_t) shave_info->width+image->page.x;
geometry.y=(ssize_t) shave_info->height+image->page.y;
shave_image=CropImage(image,&geometry,exception);
if (shave_image == (Image *) NULL)
return((Image *) NULL);
shave_image->page.width-=2*shave_info->width;
shave_image->page.height-=2*shave_info->height;
shave_image->page.x-=(ssize_t) shave_info->width;
shave_image->page.y-=(ssize_t) shave_info->height;
return(shave_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S p l i c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SpliceImage() splices a solid color into the image as defined by the
% geometry.
%
% The format of the SpliceImage method is:
%
% Image *SpliceImage(const Image *image,const RectangleInfo *geometry,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o geometry: Define the region of the image to splice with members
% x, y, width, and height.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SpliceImage(const Image *image,
const RectangleInfo *geometry,ExceptionInfo *exception)
{
#define SpliceImageTag "Splice/Image"
CacheView
*image_view,
*splice_view;
Image
*splice_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
splice_geometry;
ssize_t
columns,
y;
/*
Allocate splice image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(geometry != (const RectangleInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
splice_geometry=(*geometry);
splice_image=CloneImage(image,image->columns+splice_geometry.width,
image->rows+splice_geometry.height,MagickTrue,exception);
if (splice_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse)
{
splice_image=DestroyImage(splice_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) &&
(IsGrayColorspace(splice_image->colorspace) != MagickFalse))
(void) SetImageColorspace(splice_image,sRGBColorspace,exception);
if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) &&
(splice_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(splice_image,OpaqueAlpha,exception);
(void) SetImageBackgroundColor(splice_image,exception);
/*
Respect image geometry.
*/
switch (image->gravity)
{
default:
case UndefinedGravity:
case NorthWestGravity:
break;
case NorthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
break;
}
case NorthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
break;
}
case WestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.width/2;
break;
}
case CenterGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case EastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height/2;
break;
}
case SouthWestGravity:
{
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width/2;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
case SouthEastGravity:
{
splice_geometry.x+=(ssize_t) splice_geometry.width;
splice_geometry.y+=(ssize_t) splice_geometry.height;
break;
}
}
/*
Splice image.
*/
status=MagickTrue;
progress=0;
columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
splice_view=AcquireAuthenticCacheView(splice_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_geometry.y,1)
#endif
for (y=0; y < (ssize_t) splice_geometry.y; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1,
exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,splice_image,splice_image->rows,2)
#endif
for (y=(ssize_t) (splice_geometry.y+splice_geometry.height);
y < (ssize_t) splice_image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
if ((y < 0) || (y >= (ssize_t)splice_image->rows))
continue;
p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height,
splice_image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++)
q+=GetPixelChannels(splice_image);
for ( ; x < (ssize_t) splice_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel);
if ((traits == UndefinedPixelTrait) ||
(splice_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(splice_image,channel,p[i],q);
}
SetPixelRed(splice_image,GetPixelRed(image,p),q);
SetPixelGreen(splice_image,GetPixelGreen(image,p),q);
SetPixelBlue(splice_image,GetPixelBlue(image,p),q);
SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q);
p+=GetPixelChannels(image);
q+=GetPixelChannels(splice_image);
}
if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,SpliceImageTag,progress,
splice_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
splice_view=DestroyCacheView(splice_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
splice_image=DestroyImage(splice_image);
return(splice_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f o r m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransformImage() is a convenience method that behaves like ResizeImage() or
% CropImage() but accepts scaling and/or cropping information as a region
% geometry specification. If the operation fails, the original image handle
% is left as is.
%
% This should only be used for single images.
%
% This function destroys what it assumes to be a single image list.
% If the input image is part of a larger list, all other images in that list
% will be simply 'lost', not destroyed.
%
% Also if the crop generates a list of images only the first image is resized.
% And finally if the crop succeeds and the resize failed, you will get a
% cropped image, as well as a 'false' or 'failed' report.
%
% This function and should probably be deprecated in favor of direct calls
% to CropImageToTiles() or ResizeImage(), as appropriate.
%
% The format of the TransformImage method is:
%
% MagickBooleanType TransformImage(Image **image,const char *crop_geometry,
% const char *image_geometry,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image The transformed image is returned as this parameter.
%
% o crop_geometry: A crop geometry string. This geometry defines a
% subregion of the image to crop.
%
% o image_geometry: An image geometry string. This geometry defines the
% final size of the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType TransformImage(Image **image,
const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception)
{
Image
*resize_image,
*transform_image;
RectangleInfo
geometry;
assert(image != (Image **) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
transform_image=(*image);
if (crop_geometry != (const char *) NULL)
{
Image
*crop_image;
/*
Crop image to a user specified size.
*/
crop_image=CropImageToTiles(*image,crop_geometry,exception);
if (crop_image == (Image *) NULL)
transform_image=CloneImage(*image,0,0,MagickTrue,exception);
else
{
transform_image=DestroyImage(transform_image);
transform_image=GetFirstImageInList(crop_image);
}
*image=transform_image;
}
if (image_geometry == (const char *) NULL)
return(MagickTrue);
/*
Scale image to a user specified size.
*/
(void) ParseRegionGeometry(transform_image,image_geometry,&geometry,
exception);
if ((transform_image->columns == geometry.width) &&
(transform_image->rows == geometry.height))
return(MagickTrue);
resize_image=ResizeImage(transform_image,geometry.width,geometry.height,
transform_image->filter,exception);
if (resize_image == (Image *) NULL)
return(MagickFalse);
transform_image=DestroyImage(transform_image);
transform_image=resize_image;
*image=transform_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s p o s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransposeImage() creates a horizontal mirror image by reflecting the pixels
% around the central y-axis while rotating them by 90 degrees.
%
% The format of the TransposeImage method is:
%
% Image *TransposeImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception)
{
#define TransposeImageTag "Transpose/Image"
CacheView
*image_view,
*transpose_view;
Image
*transpose_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transpose_image == (Image *) NULL)
return((Image *) NULL);
/*
Transpose image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transpose_view=AcquireAuthenticCacheView(transpose_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transpose_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1,
image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1),
0,1,transpose_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transpose_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transpose_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(transpose_image);
}
if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransposeImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transpose_view=DestroyCacheView(transpose_view);
image_view=DestroyCacheView(image_view);
transpose_image->type=image->type;
page=transpose_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
transpose_image->page=page;
if (status == MagickFalse)
transpose_image=DestroyImage(transpose_image);
return(transpose_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s v e r s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransverseImage() creates a vertical mirror image by reflecting the pixels
% around the central x-axis while rotating them by 270 degrees.
%
% The format of the TransverseImage method is:
%
% Image *TransverseImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception)
{
#define TransverseImageTag "Transverse/Image"
CacheView
*image_view,
*transverse_view;
Image
*transverse_image;
MagickBooleanType
status;
MagickOffsetType
progress;
RectangleInfo
page;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue,
exception);
if (transverse_image == (Image *) NULL)
return((Image *) NULL);
/*
Transverse image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
transverse_view=AcquireAuthenticCacheView(transverse_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,transverse_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1),
0,1,transverse_image->rows,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
q+=GetPixelChannels(transverse_image)*image->columns;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
q-=GetPixelChannels(transverse_image);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(transverse_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(transverse_image,channel,p[i],q);
}
p+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(transverse_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,TransverseImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
transverse_view=DestroyCacheView(transverse_view);
image_view=DestroyCacheView(image_view);
transverse_image->type=image->type;
page=transverse_image->page;
Swap(page.width,page.height);
Swap(page.x,page.y);
if (page.width != 0)
page.x=(ssize_t) (page.width-transverse_image->columns-page.x);
if (page.height != 0)
page.y=(ssize_t) (page.height-transverse_image->rows-page.y);
transverse_image->page=page;
if (status == MagickFalse)
transverse_image=DestroyImage(transverse_image);
return(transverse_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r i m I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TrimImage() trims pixels from the image edges. It allocates the memory
% necessary for the new Image structure and returns a pointer to the new
% image.
%
% The format of the TrimImage method is:
%
% Image *TrimImage(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception)
{
RectangleInfo
geometry;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
geometry=GetImageBoundingBox(image,exception);
if ((geometry.width == 0) || (geometry.height == 0))
{
Image
*crop_image;
crop_image=CloneImage(image,1,1,MagickTrue,exception);
if (crop_image == (Image *) NULL)
return((Image *) NULL);
crop_image->background_color.alpha_trait=BlendPixelTrait;
crop_image->background_color.alpha=(MagickRealType) TransparentAlpha;
(void) SetImageBackgroundColor(crop_image,exception);
crop_image->page=image->page;
crop_image->page.x=(-1);
crop_image->page.y=(-1);
return(crop_image);
}
geometry.x+=image->page.x;
geometry.y+=image->page.y;
return(CropImage(image,&geometry,exception));
}
|
relax.c | /*BHEADER**********************************************************************
* Copyright (c) 2006 The Regents of the University of California.
* Produced at the Lawrence Livermore National Laboratory.
* Written by the HYPRE team. UCRL-CODE-222953.
* All rights reserved.
*
* This file is part of HYPRE (see http://www.llnl.gov/CASC/hypre/).
* Please see the COPYRIGHT_and_LICENSE file for the copyright notice,
* disclaimer, contact information and the GNU Lesser General Public License.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License (as published by the Free Software
* Foundation) version 2.1 dated February 1999.
*
* HYPRE is distributed in the hope that it will be useful, but WITHOUT ANY
* WARRANTY; without even the IMPLIED WARRANTY OF MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the terms and conditions of the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* $Revision: 2.8 $
***********************************************************************EHEADER*/
/******************************************************************************
*
* Relaxation scheme
*
*****************************************************************************/
#include "headers.h"
#include "omp.h"
/*--------------------------------------------------------------------------
* hypre_BoomerAMGSeqRelax
*--------------------------------------------------------------------------*/
int hypre_BoomerAMGSeqRelax( hypre_CSRMatrix *A,
hypre_Vector *f,
hypre_Vector *u)
{
double *A_diag_data = hypre_CSRMatrixData(A);
int *A_diag_i = hypre_CSRMatrixI(A);
int *A_diag_j = hypre_CSRMatrixJ(A);
int n = hypre_CSRMatrixNumRows(A);
double *u_data = hypre_VectorData(u);
double *f_data = hypre_VectorData(f);
double *tmp_data;
double res;
int i, j;
int ii, jj;
int ns, ne, size, rest;
int relax_error = 0;
// int index, start;
int num_threads;
num_threads = hypre_NumThreads();
/*-----------------------------------------------------------------------
* Switch statement to direct control based on relax_type:
* relax_type = 3 -> hybrid: SOR-J mix off-processor, SOR on-processor
* with outer relaxation parameters (forward solve)
*-----------------------------------------------------------------------*/
/*-----------------------------------------------------------------
* Relax all points.
*-----------------------------------------------------------------*/
if (1)
{
tmp_data = hypre_CTAlloc(double,n);
#pragma omp parallel private(num_threads)
{
num_threads = omp_get_num_threads();
//#pragma omp for private(i) schedule(dynamic)
#pragma omp for private(i) schedule(dynamic, n/16)
for (i = 0; i < n; i++)
tmp_data[i] = u_data[i];
//#pragma omp for private(i,ii,j,jj,ns,ne,res,rest,size) schedule(dynamic)
#pragma omp for private(i,ii,j,jj,ns,ne,res,rest,size) schedule(dynamic,1)
for (j = 0; j < num_threads; j++)
{
size = n/num_threads;
rest = n - size*num_threads;
if (j < rest)
{
ns = j*size+j;
ne = (j+1)*size+j+1;
}
else
{
ns = j*size+rest;
ne = (j+1)*size+rest;
}
for (i = ns; i < ne; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
if (ii >= ns && ii < ne)
res -= A_diag_data[jj] * u_data[ii];
else
res -= A_diag_data[jj] * tmp_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
}
hypre_TFree(tmp_data);
}
else
{
for (i = 0; i < n; i++) /* interior points first */
{
/*-----------------------------------------------------------
* If diagonal is nonzero, relax point i; otherwise, skip it.
*-----------------------------------------------------------*/
if ( A_diag_data[A_diag_i[i]] != 0.0)
{
res = f_data[i];
for (jj = A_diag_i[i]+1; jj < A_diag_i[i+1]; jj++)
{
ii = A_diag_j[jj];
res -= A_diag_data[jj] * u_data[ii];
}
u_data[i] = res / A_diag_data[A_diag_i[i]];
}
}
}
return(relax_error);
}
|
mesh.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include "utils.h"
void set_num_threads(int num_threads)
{
if (num_threads>0) omp_set_num_threads(num_threads);
}
int get_num_threads()
{
//Calculate number of threads
int num_threads=0;
#pragma omp parallel
{
#pragma omp atomic
num_threads++;
}
return num_threads;
}
int assign_cic(FLOAT* mesh, const int* nmesh, const FLOAT* positions, const FLOAT* weights, size_t npositions) {
// Assign positions (weights) to mesh
// Asumes periodic boundaries
// Positions must be in [0,nmesh-1]
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
for (size_t ii=0; ii<npositions; ii++) {
const FLOAT weight = weights[ii];
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = (int) pos[0];
int iy0 = (int) pos[1];
int iz0 = (int) pos[2];
if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
return -1;
}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
mesh[ix0+iy0+iz0] += (1-dx)*(1-dy)*(1-dz)*weight;
mesh[ix0+iy0+izp] += (1-dx)*(1-dy)*dz*weight;
mesh[ix0+iyp+iz0] += (1-dx)*dy*(1-dz)*weight;
mesh[ix0+iyp+izp] += (1-dx)*dy*dz*weight;
mesh[ixp+iy0+iz0] += dx*(1-dy)*(1-dz)*weight;
mesh[ixp+iy0+izp] += dx*(1-dy)*dz*weight;
mesh[ixp+iyp+iz0] += dx*dy*(1-dz)*weight;
mesh[ixp+iyp+izp] += dx*dy*dz*weight;
}
return 0;
}
int convolve(FLOAT* mesh, const int* nmesh, const FLOAT* kernel, const int* nkernel) {
// Performs a Gaussian smoothing using brute-force convolution.
// Asumes periodic boundaries
FLOAT sumw = 0;
size_t size = nkernel[0]*nkernel[1]*nkernel[2];
const size_t nkernelz = nkernel[2];
const size_t nkernelyz = nkernel[2]*nkernel[1];
for (size_t ii=0; ii<size; ii++) sumw += kernel[ii];
// Take a copy of the data we're smoothing.
size = nmesh[0]*nmesh[1]*nmesh[2];
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT *ss = (FLOAT *) malloc(size*sizeof(FLOAT));
for (size_t ii=0; ii<size; ii++) ss[ii] = mesh[ii];
FLOAT rad[NDIM];
for (int idim=0; idim<NDIM; idim++) {
rad[idim] = nkernel[idim] / 2;
if (nkernel[idim] % 2 == 0) {
printf("Kernel size must be odd");
free(ss);
return -1;
}
}
#pragma omp parallel for shared(mesh,ss,kernel)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
for (int iz=0; iz<nmesh[2]; iz++) {
FLOAT sumd = 0;
for (int dx=-rad[0]; dx<=rad[0]; dx++) {
size_t iix = nmeshyz*((ix+dx+nmesh[0]) % nmesh[0]);
size_t jjx = nkernelyz*(rad[0]+dx);
for (int dy=-rad[1]; dy<=rad[1]; dy++) {
size_t iiy = nmeshz*((iy+dy+nmesh[1]) % nmesh[1]);
size_t jjy = nkernelz*(rad[1]+dy);
for (int dz=-rad[2]; dz<=rad[2]; dz++) {
int iiz = (iz+dz+nmesh[2]) % nmesh[2];
size_t ii = iix+iiy+iiz;
size_t jj = jjx+jjy+rad[2]+dz;
sumd += kernel[jj]*ss[ii];
}
}
}
mesh[nmeshyz*ix+nmeshz*iy+iz] = sumd/sumw;
}
}
}
free(ss);
return 0;
}
int smooth_gaussian(FLOAT* mesh, const int* nmesh, const FLOAT* smoothing_radius, const FLOAT nsigmas) {
// Performs a Gaussian smoothing using brute-force convolution.
// Now set up the smoothing stencil.
// The number of grid points to search: >= nsigmas * smoothing_radius.
int rad[NDIM], nkernel[NDIM];
FLOAT fact[NDIM];
for (int idim=0; idim<NDIM; idim++) {
rad[idim] = (size_t) (nsigmas*smoothing_radius[idim]*nmesh[idim] + 1.0);
nkernel[idim] = 2*rad[idim] + 1;
fact[idim] = 1.0/(nmesh[idim]*smoothing_radius[idim])/(nmesh[idim]*smoothing_radius[idim]);
}
FLOAT *kernel = (FLOAT *) malloc(nkernel[0]*nkernel[1]*nkernel[2]*sizeof(FLOAT));
#pragma omp parallel for shared(kernel)
for (int dx=-rad[0]; dx<=rad[0]; dx++) {
for (int dy=-rad[1]; dy<=rad[1]; dy++) {
for (int dz=-rad[2]; dz<=rad[2]; dz++) {
size_t ii = nkernel[1]*nkernel[2]*(rad[0]+dx)+nkernel[0]*(dy+rad[1])+(dz+rad[2]);
FLOAT r2 = fact[0]*dx*dx+fact[1]*dy*dy+fact[2]*dz*dz;
if (r2 < nsigmas*nsigmas) kernel[ii] = exp(-r2/2);
else kernel[ii] = 0;
}
}
}
convolve(mesh,nmesh,kernel,nkernel);
free(kernel);
return 0;
}
/*
void smooth_fft_gaussian(FLOAT *mesh, const int* nmesh, const FLOAT* smoothing_radius) {
// Performs a Gaussian smoothing using the FFTW library (v3), assumed to be
// installed already. Rf is assumed to be in box units.
// Make temporary vectors. FFTW uses double precision.
size_t size = nmesh[0]*nmesh[1]*nmesh[2];
fftw_complex * meshk = (fftw_complex *) malloc(nmesh[0]*nmesh[1]*(nmesh[2]/2+1)*sizeof(fftw_complex));
// Generate the FFTW plan files.
fftw_init_threads();
fftw_plan_with_nthreads(omp_get_max_threads());
fftw_plan fplan = fftw_plan_dft_r2c_3d(nmesh[0],nmesh[1],nmesh[2],mesh,meshk,FFTW_ESTIMATE);
fftw_plan iplan = fftw_plan_dft_c2r_3d(nmesh[0],nmesh[1],nmesh[2],meshk,mesh,FFTW_ESTIMATE);
fftw_execute(fplan);
// Now multiply by the smoothing filter.
FLOAT fact[NDIM];
for (int idim=0; idim<NDIM; idim++) fact[idim] = 0.5*smoothing_radius[idim]*smoothing_radius[idim]*(2*M_PI)*(2*M_PI);
#pragma omp parallel for shared(meshk)
for (int ix=0; ix<nmesh[0]; ix++) {
int iix = (ix<=nmesh[0]/2) ? ix : ix-nmesh[0];
for (int iy=0; iy<nmesh[1]; iy++) {
int iiy = (iy<=nmesh[1]/2) ? iy : iy-nmesh[1];
for (int iz=0; iz<nmesh[2]/2+1; iz++) {
int iiz = iz;
size_t ip = nmesh[1]*(nmesh[2]/2+1)*ix+(nmesh[2]/2+1)*iy+iz;
FLOAT smth = exp(-fact[0]*iix*iix+fact[1]*iiy*iiy+fact[2]*iiz*iiz));
meshk[ip][0] *= smth;
meshk[ip][1] *= smth;
}
}
}
meshk[0][0] = meshk[0][1] = 0; // Set the mean to zero.
fftw_execute(iplan);
#pragma omp parallel for shared(mesh)
for (size_t ii=0; ii<size; ii++) mesh[ii] /= size;
fftw_destroy_plan(fplan);
fftw_destroy_plan(iplan);
fftw_cleanup_threads();
free(meshk);
}
*/
int read_finite_difference_cic(const FLOAT* mesh, const int* nmesh, const FLOAT* boxsize, const FLOAT* positions, FLOAT* shifts, size_t npositions) {
// Computes the displacement field from mesh using second-order accurate
// finite difference and shifts the data and randoms.
// The displacements are pulled from the grid onto the positions of the
// particles using CIC.
// Positions must be in [0,nmesh-1]
// Output is in boxsize unit
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
FLOAT cell[NDIM];
for (int idim=0; idim<NDIM; idim++) cell[idim] = 2.0*boxsize[idim]/nmesh[idim];
int flag = 0;
#pragma omp parallel for shared(mesh,positions,shifts)
for (size_t ii=0; ii<npositions; ii++) {
if (flag) continue;
// This is written out in gory detail both to make it easier to
// see what's going on and to encourage the compiler to optimize
// and vectorize the code as much as possible.
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = (int) pos[0];
int iy0 = (int) pos[1];
int iz0 = (int) pos[2];
if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
flag = 1;
continue;
}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t ixpp = nmeshyz*((ix0+2) % nmesh[0]);
size_t ixm = nmeshyz*((ix0-1+nmesh[0]) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t iypp = nmeshz*((iy0+2) % nmesh[1]);
size_t iym = nmeshz*((iy0-1+nmesh[1]) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
size_t izpp = (iz0+2) % nmesh[2];
size_t izm = (iz0-1+nmesh[2]) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
FLOAT px,py,pz,wt;
wt = (1-dx)*(1-dy)*(1-dz);
px = (mesh[ixp+iy0+iz0]-mesh[ixm+iy0+iz0])*wt;
py = (mesh[ix0+iyp+iz0]-mesh[ix0+iym+iz0])*wt;
pz = (mesh[ix0+iy0+izp]-mesh[ix0+iy0+izm])*wt;
wt = dx*(1-dy)*(1-dz);
px += (mesh[ixpp+iy0+iz0]-mesh[ix0+iy0+iz0])*wt;
py += (mesh[ixp+iyp+iz0]-mesh[ixp+iym+iz0])*wt;
pz += (mesh[ixp+iy0+izp]-mesh[ixp+iy0+izm])*wt;
wt = (1-dx)*dy*(1-dz);
px += (mesh[ixp+iyp+iz0]-mesh[ixm+iyp+iz0])*wt;
py += (mesh[ix0+iypp+iz0]-mesh[ix0+iy0+iz0])*wt;
pz += (mesh[ix0+iyp+izp]-mesh[ix0+iyp+izm])*wt;
wt = (1-dx)*(1-dy)*dz;
px += (mesh[ixp+iy0+izp]-mesh[ixm+iy0+izp])*wt;
py += (mesh[ix0+iyp+izp]-mesh[ix0+iym+izp])*wt;
pz += (mesh[ix0+iy0+izpp]-mesh[ix0+iy0+iz0])*wt;
wt = dx*dy*(1-dz);
px += (mesh[ixpp+iyp+iz0]-mesh[ix0+iyp+iz0])*wt;
py += (mesh[ixp+iypp+iz0]-mesh[ixp+iy0+iz0])*wt;
pz += (mesh[ixp+iyp+izp]-mesh[ixp+iyp+izm])*wt;
wt = dx*(1-dy)*dz;
px += (mesh[ixpp+iy0+izp]-mesh[ix0+iy0+izp])*wt;
py += (mesh[ixp+iyp+izp]-mesh[ixp+iym+izp])*wt;
pz += (mesh[ixp+iy0+izpp]-mesh[ixp+iy0+iz0])*wt;
wt = (1-dx)*dy*dz;
px += (mesh[ixp+iyp+izp]-mesh[ixm+iyp+izp])*wt;
py += (mesh[ix0+iypp+izp]-mesh[ix0+iy0+izp])*wt;
pz += (mesh[ix0+iyp+izpp]-mesh[ix0+iyp+iz0])*wt;
wt = dx*dy*dz;
px += (mesh[ixpp+iyp+izp]-mesh[ix0+iyp+izp])*wt;
py += (mesh[ixp+iypp+izp]-mesh[ixp+iy0+izp])*wt;
pz += (mesh[ixp+iyp+izpp]-mesh[ixp+iyp+iz0])*wt;
FLOAT *sh = &(shifts[ii*NDIM]);
//px *= boxsize[0]*boxsize[0];
//py *= boxsize[0]*boxsize[0];
//pz *= boxsize[0]*boxsize[0];
sh[0] = px/cell[0];
sh[1] = py/cell[1];
sh[2] = pz/cell[2];
}
if (flag) return -1;
return 0;
}
int read_cic(const FLOAT* mesh, const int* nmesh, const FLOAT* positions, FLOAT* shifts, size_t npositions) {
// Positions must be in [0,nmesh-1]
const size_t nmeshz = nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
int flag = 0;
#pragma omp parallel for shared(mesh,positions,shifts,flag)
for (size_t ii=0; ii<npositions; ii++) {
if (flag) continue;
const FLOAT *pos = &(positions[ii*NDIM]);
int ix0 = (int) pos[0];
int iy0 = (int) pos[1];
int iz0 = (int) pos[2];
if (ix0<0 || ix0>=nmesh[0] || iy0<0 || iy0>=nmesh[1] || iz0<0 || iz0>=nmesh[2]) {
printf("Index out of range: (ix,iy,iz) = (%d,%d,%d) for (%.3f,%.3f,%.3f)\n",ix0,iy0,iz0,pos[0],pos[1],pos[2]);
flag = 1;
continue;
}
FLOAT dx = pos[0] - ix0;
FLOAT dy = pos[1] - iy0;
FLOAT dz = pos[2] - iz0;
size_t ixp = nmeshyz*((ix0+1) % nmesh[0]);
size_t iyp = nmeshz*((iy0+1) % nmesh[1]);
size_t izp = (iz0+1) % nmesh[2];
ix0 *= nmeshyz;
iy0 *= nmeshz;
FLOAT px;
px = mesh[ix0+iy0+iz0]*(1-dx)*(1-dy)*(1-dz);
px += mesh[ix0+iy0+izp]*(1-dx)*(1-dy)*dz;
px += mesh[ix0+iyp+iz0]*(1-dx)*dy*(1-dz);
px += mesh[ix0+iyp+izp]*(1-dx)*dy*dz;
px += mesh[ixp+iy0+iz0]*dx*(1-dy)*(1-dz);
px += mesh[ixp+iy0+izp]*dx*(1-dy)*dz;
px += mesh[ixp+iyp+iz0]*dx*dy*(1-dz);
px += mesh[ixp+iyp+izp]*dx*dy*dz;
shifts[ii] = px;
}
if (flag) return -1;
return 0;
}
/*
int copy(FLOAT* input_array, FLOAT* output_array, const size_t size) {
#pragma omp parallel for schedule(dynamic) shared(input_array, output_array)
for (size_t ii=0; ii<size; ii++) output_array[ii] = input_array[ii];
return 0;
}
*/
int copy(FLOAT* input_array, FLOAT* output_array, const size_t size) {
int chunksize = 100000;
#pragma omp parallel for schedule(static, chunksize) shared(input_array, output_array)
for (size_t ii=0; ii<size; ii++) output_array[ii] = input_array[ii];
return 0;
}
int prod_sum(FLOAT* mesh, const int* nmesh, const FLOAT* coords, const int exp) {
// We expand everything to help compiler
// Slightly faster than a numpy code
// NOTE: coords should list arrays to apply along z, y and x, in this order
const size_t nmeshz = nmesh[2];
const size_t nmeshypz = nmesh[1] + nmesh[2];
const size_t nmeshyz = nmesh[2]*nmesh[1];
if (exp == -1) {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] /= (xy + coords[iz]);
}
}
}
else if (exp == 1) {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] *= (xy + coords[iz]);
}
}
}
else {
#pragma omp parallel for shared(mesh)
for (int ix=0; ix<nmesh[0]; ix++) {
for (int iy=0; iy<nmesh[1]; iy++) {
FLOAT xy = coords[nmeshz + iy] + coords[nmeshypz + ix];
size_t ixy = nmeshyz*ix + nmeshz*iy;
for (int iz=0; iz<nmesh[2]; iz++) mesh[ixy + iz] *= POW((xy + coords[iz]), exp);
}
}
}
return 0.;
}
|
base.h | #include "callback.h"
#include <omp.h>
int main()
{
unsigned int i;
#pragma omp parallel for num_threads(4) schedule(SCHEDULE)
for (i = 0; i < 4; i++) {
}
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id={{[0-9]+}}, parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker={{.*}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_begin: parallel_id=[[PARALLEL_ID]], parent_task_id=[[IMPLICIT_TASK_ID]], workshare_function=0x{{[0-f]+}}
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_loop_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
task_dep1.c | // Task dependencies are part of OpenMP 4.0, which is only supported in GCC 4.9 or newer.
#include <stdio.h>
int main()
{
int x = 1;
#pragma omp parallel
#pragma omp single
{
#pragma omp task shared(x) depend(out: x)
x = 2;
#pragma omp task shared(x) depend(in: x)
printf("x = %d\n", x);
}
return 0;
}
|
opencl_blockchain_fmt_plug.c | /* blockchain "My Wallet" cracker patch for JtR. Hacked together during June of
* 2013 by Dhiru Kholia <dhiru at openwall.com>.
*
* See https://blockchain.info/wallet/wallet-format
* This software is Copyright (c) 2012 Lukas Odzioba <ukasz at openwall.net>
* and Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com>, and it is
* hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*
* Improved detection, added iteration count and handle v2 hashes, Feb, 2015, JimF.
*/
#ifdef HAVE_OPENCL
#if FMT_EXTERNS_H
extern struct fmt_main fmt_opencl_blockchain;
#elif FMT_REGISTERS_H
john_register_one(&fmt_opencl_blockchain);
#else
#include <stdint.h>
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#include "arch.h"
#include "aes.h"
#include "formats.h"
#include "common.h"
#include "jumbo.h"
#include "common-opencl.h"
#include "options.h"
#include "blockchain_common.h"
#define FORMAT_LABEL "blockchain-opencl"
#define FORMAT_NAME "blockchain My Wallet"
#define ALGORITHM_NAME "PBKDF2-SHA1 OpenCL AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#define BINARY_SIZE 0
#define PLAINTEXT_LENGTH 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_ALIGN 4
#define BIG_ENOUGH (8192 * 32)
// increase me (in multiples of 16) to increase the decrypted and search area
#define SAFETY_FACTOR 160
typedef struct {
uint32_t length;
uint8_t v[PLAINTEXT_LENGTH];
} blockchain_password;
typedef struct {
uint32_t v[32/4];
} blockchain_hash;
typedef struct {
uint32_t iterations;
uint32_t outlen;
uint32_t skip_bytes;
uint8_t length;
uint8_t salt[64];
} blockchain_salt;
static int *cracked;
static int any_cracked;
static struct custom_salt *cur_salt;
static cl_int cl_error;
static blockchain_password *inbuffer;
static blockchain_hash *outbuffer;
static blockchain_salt currentsalt;
static cl_mem mem_in, mem_out, mem_setting;
static struct fmt_main *self;
size_t insize, outsize, settingsize, cracked_size;
#define STEP 0
#define SEED 256
// This file contains auto-tuning routine(s). Has to be included after formats definitions.
#include "opencl_autotune.h"
#include "memdbg.h"
static const char * warn[] = {
"xfer: ", ", crypt: ", ", xfer: "
};
/* ------- Helper functions ------- */
static size_t get_task_max_work_group_size()
{
return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel);
}
static void create_clobj(size_t gws, struct fmt_main *self)
{
insize = sizeof(blockchain_password) * gws;
outsize = sizeof(blockchain_hash) * gws;
settingsize = sizeof(blockchain_salt);
cracked_size = sizeof(*cracked) * gws;
inbuffer = mem_calloc(1, insize);
outbuffer = mem_alloc(outsize);
cracked = mem_calloc(1, cracked_size);
/// Allocate memory
mem_in =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem in");
mem_setting =
clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize,
NULL, &cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem setting");
mem_out =
clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL,
&cl_error);
HANDLE_CLERROR(cl_error, "Error allocating mem out");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in),
&mem_in), "Error while setting mem_in kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out),
&mem_out), "Error while setting mem_out kernel argument");
HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting),
&mem_setting), "Error while setting mem_salt kernel argument");
}
static void release_clobj(void)
{
if (cracked) {
HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in");
HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting");
HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out");
MEM_FREE(inbuffer);
MEM_FREE(outbuffer);
MEM_FREE(cracked);
}
}
static void done(void)
{
if (autotuned) {
release_clobj();
HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel");
HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program");
autotuned--;
}
}
static void init(struct fmt_main *_self)
{
self = _self;
opencl_prepare_dev(gpu_id);
}
static void reset(struct db_main *db)
{
if (!autotuned) {
char build_opts[64];
snprintf(build_opts, sizeof(build_opts),
"-DKEYLEN=%d -DSALTLEN=%d -DOUTLEN=%d",
PLAINTEXT_LENGTH,
(int)sizeof(currentsalt.salt),
(int)sizeof(outbuffer->v));
opencl_init("$JOHN/kernels/pbkdf2_hmac_sha1_unsplit_kernel.cl",
gpu_id, build_opts);
crypt_kernel = clCreateKernel(program[gpu_id], "derive_key", &cl_error);
HANDLE_CLERROR(cl_error, "Error creating kernel");
// Initialize openCL tuning (library) for this format.
opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self,
create_clobj, release_clobj,
sizeof(blockchain_password), 0, db);
// Auto tune execution from shared/included code.
autotune_run(self, 1, 0, 1000);
}
}
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
memcpy((char*)currentsalt.salt, cur_salt->data, 16);
currentsalt.length = 16;
currentsalt.iterations = cur_salt->iter;
currentsalt.outlen = 32;
currentsalt.skip_bytes = 0;
HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting,
CL_FALSE, 0, settingsize, ¤tsalt, 0, NULL, NULL),
"Copy salt to gpu");
}
#undef set_key
static void set_key(char *key, int index)
{
uint8_t length = strlen(key);
if (length > PLAINTEXT_LENGTH)
length = PLAINTEXT_LENGTH;
inbuffer[index].length = length;
memcpy(inbuffer[index].v, key, length);
}
static char *get_key(int index)
{
static char ret[PLAINTEXT_LENGTH + 1];
uint8_t length = inbuffer[index].length;
memcpy(ret, inbuffer[index].v, length);
ret[length] = '\0';
return ret;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index;
size_t *lws = local_work_size ? &local_work_size : NULL;
global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size);
if (any_cracked) {
memset(cracked, 0, cracked_size);
any_cracked = 0;
}
/// Copy data to gpu
BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0,
insize, inbuffer, 0, NULL, multi_profilingEvent[0]),
"Copy data to gpu");
/// Run kernel
BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1,
NULL, &global_work_size, lws, 0, NULL,
multi_profilingEvent[1]), "Run kernel");
/// Read the result back
BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0,
outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back");
if (ocl_autotune_running)
return count;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index++)
if (!blockchain_decrypt((unsigned char*)outbuffer[index].v, cur_salt->data))
{
cracked[index] = 1;
#ifdef _OPENMP
#pragma omp atomic
#endif
any_cracked |= 1;
}
return count;
}
static int cmp_all(void *binary, int count)
{
return any_cracked;
}
static int cmp_one(void *binary, int index)
{
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_opencl_blockchain = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT,
{ NULL },
{ FORMAT_TAG },
blockchain_tests
}, {
init,
done,
reset,
fmt_default_prepare,
blockchain_common_valid,
fmt_default_split,
fmt_default_binary,
blockchain_common_get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
fmt_default_get_hash
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
#endif /* HAVE_OPENCL */
|
tnlDirectEikonalMethodBase3D_impl.h | #pragma once
template< typename Real,
typename Device,
typename Index >
void
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
initInterface( const MeshFunctionPointer& _input,
MeshFunctionPointer& _output,
InterfaceMapPointer& _interfaceMap,
StaticVector vLower, StaticVector vUpper )
{
if( std::is_same< Device, Devices::Cuda >::value )
{
#ifdef HAVE_CUDA
const MeshType& mesh = _input->getMesh();
const int cudaBlockSize( 8 );
int numBlocksX = Cuda::getNumberOfBlocks( mesh.getDimensions().x(), cudaBlockSize );
int numBlocksY = Cuda::getNumberOfBlocks( mesh.getDimensions().y(), cudaBlockSize );
int numBlocksZ = Cuda::getNumberOfBlocks( mesh.getDimensions().z(), cudaBlockSize );
if( cudaBlockSize * cudaBlockSize * cudaBlockSize > 1024 || numBlocksX > 1024 || numBlocksY > 1024 || numBlocksZ > 64 )
std::cout << "Invalid kernel call. Dimensions of grid are max: [1024,1024,64], and maximum threads per block are 1024!" << std::endl;
dim3 blockSize( cudaBlockSize, cudaBlockSize, cudaBlockSize );
dim3 gridSize( numBlocksX, numBlocksY, numBlocksZ );
Pointers::synchronizeSmartPointersOnDevice< Devices::Cuda >();
CudaInitCaller3d<<< gridSize, blockSize >>>( _input.template getData< Device >(),
_output.template modifyData< Device >(),
_interfaceMap.template modifyData< Device >(), vLower, vUpper );
cudaDeviceSynchronize();
TNL_CHECK_CUDA_DEVICE;
#endif
}
if( std::is_same< Device, Devices::Host >::value )
{
const MeshFunctionType& input = _input.getData();
MeshFunctionType& output = _output.modifyData();
InterfaceMapType& interfaceMap = _interfaceMap.modifyData();
const MeshType& mesh = input.getMesh();
typedef typename MeshType::Cell Cell;
Cell cell( mesh );
for( cell.getCoordinates().z() = 0;
cell.getCoordinates().z() < mesh.getDimensions().z();
cell.getCoordinates().z() ++ )
for( cell.getCoordinates().y() = 0;
cell.getCoordinates().y() < mesh.getDimensions().y();
cell.getCoordinates().y() ++ )
for( cell.getCoordinates().x() = 0;
cell.getCoordinates().x() < mesh.getDimensions().x();
cell.getCoordinates().x() ++ )
{
cell.refresh();
output[ cell.getIndex() ] =
input( cell ) > 0 ? std::numeric_limits< RealType >::max() :
- std::numeric_limits< RealType >::max();
interfaceMap[ cell.getIndex() ] = false;
}
const RealType& hx = mesh.getSpaceSteps().x();
const RealType& hy = mesh.getSpaceSteps().y();
const RealType& hz = mesh.getSpaceSteps().z();
for( cell.getCoordinates().z() = 0 + vLower[2];
cell.getCoordinates().z() < mesh.getDimensions().z() - vUpper[2];
cell.getCoordinates().z() ++ )
for( cell.getCoordinates().y() = 0 + vLower[1];
cell.getCoordinates().y() < mesh.getDimensions().y() - vUpper[1];
cell.getCoordinates().y() ++ )
for( cell.getCoordinates().x() = 0 + vLower[0];
cell.getCoordinates().x() < mesh.getDimensions().x() - vUpper[0];
cell.getCoordinates().x() ++ )
{
cell.refresh();
const RealType& c = input( cell );
if( ! cell.isBoundaryEntity() )
{
auto neighbors = cell.getNeighborEntities();
Real pom = 0;
const IndexType e = neighbors.template getEntityIndex< 1, 0, 0 >();
const IndexType n = neighbors.template getEntityIndex< 0, 1, 0 >();
const IndexType t = neighbors.template getEntityIndex< 0, 0, 1 >();
if( c * input[ n ] <= 0 )
{
pom = TNL::sign( c )*( hy * c )/( c - input[ n ]);
if( TNL::abs( output[ cell.getIndex() ] ) > TNL::abs( pom ) )
output[ cell.getIndex() ] = pom;
pom = pom - TNL::sign( c )*hy;
if( TNL::abs( output[ n ] ) > TNL::abs( pom ) )
output[ n ] = pom; //( hy * c )/( c - input[ n ]) - hy;
interfaceMap[ cell.getIndex() ] = true;
interfaceMap[ n ] = true;
}
if( c * input[ e ] <= 0 )
{
pom = TNL::sign( c )*( hx * c )/( c - input[ e ]);
if( TNL::abs( output[ cell.getIndex() ] ) > TNL::abs( pom ) )
output[ cell.getIndex() ] = pom;
pom = pom - TNL::sign( c )*hx;
if( TNL::abs( output[ e ] ) > TNL::abs( pom ) )
output[ e ] = pom;
interfaceMap[ cell.getIndex() ] = true;
interfaceMap[ e ] = true;
}
if( c * input[ t ] <= 0 )
{
pom = TNL::sign( c )*( hz * c )/( c - input[ t ]);
if( TNL::abs( output[ cell.getIndex() ] ) > TNL::abs( pom ) )
output[ cell.getIndex() ] = pom;
pom = pom - TNL::sign( c )*hz;
if( TNL::abs( output[ t ] ) > TNL::abs( pom ) )
output[ t ] = pom;
interfaceMap[ cell.getIndex() ] = true;
interfaceMap[ t ] = true;
}
}
}
}
}
template< typename Real,
typename Device,
typename Index >
template< typename MeshEntity >
__cuda_callable__
bool
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
updateCell( MeshFunctionType& u,
const MeshEntity& cell,
const RealType v )
{
const auto& neighborEntities = cell.template getNeighborEntities< 3 >();
const MeshType& mesh = cell.getMesh();
const RealType& hx = mesh.getSpaceSteps().x();
const RealType& hy = mesh.getSpaceSteps().y();
const RealType& hz = mesh.getSpaceSteps().z();
const RealType value = u( cell );
//std::cout << value << std::endl;
RealType a, b, c, tmp = std::numeric_limits< RealType >::max();
if( cell.getCoordinates().x() == 0 )
a = u[ neighborEntities.template getEntityIndex< 1, 0, 0 >() ];
else if( cell.getCoordinates().x() == mesh.getDimensions().x() - 1 )
a = u[ neighborEntities.template getEntityIndex< -1, 0, 0 >() ];
else
{
a = TNL::argAbsMin( u[ neighborEntities.template getEntityIndex< -1, 0, 0 >() ],
u[ neighborEntities.template getEntityIndex< 1, 0, 0 >() ] );
}
if( cell.getCoordinates().y() == 0 )
b = u[ neighborEntities.template getEntityIndex< 0, 1, 0 >() ];
else if( cell.getCoordinates().y() == mesh.getDimensions().y() - 1 )
b = u[ neighborEntities.template getEntityIndex< 0, -1, 0 >() ];
else
{
b = TNL::argAbsMin( u[ neighborEntities.template getEntityIndex< 0, -1, 0 >() ],
u[ neighborEntities.template getEntityIndex< 0, 1, 0 >() ] );
}
if( cell.getCoordinates().z() == 0 )
c = u[ neighborEntities.template getEntityIndex< 0, 0, 1 >() ];
else if( cell.getCoordinates().z() == mesh.getDimensions().z() - 1 )
c = u[ neighborEntities.template getEntityIndex< 0, 0, -1 >() ];
else
{
c = TNL::argAbsMin( u[ neighborEntities.template getEntityIndex< 0, 0, -1 >() ],
u[ neighborEntities.template getEntityIndex< 0, 0, 1 >() ] );
}
if( fabs( a ) == std::numeric_limits< RealType >::max() &&
fabs( b ) == std::numeric_limits< RealType >::max() &&
fabs( c ) == std::numeric_limits< RealType >::max() )
return false;
RealType pom[6] = { a, b, c, hx, hy, hz};
tmp = getNewValue( pom , value, v );
u[ cell.getIndex() ] = tmp;
tmp = value - u[ cell.getIndex() ];
if ( fabs( tmp ) > 0.001*hx )
return true;
else
return false;
/*sortMinims( pom );
tmp = pom[ 0 ] + TNL::sign( value ) * pom[ 3 ];
if( fabs( tmp ) < fabs( pom[ 1 ] ) )
{
u[ cell.getIndex() ] = argAbsMin( value, tmp );
tmp = value - u[ cell.getIndex() ];
if ( fabs( tmp ) > 0.001*hx ){
return true;
}else{
return false;
}
}
else
{
tmp = ( pom[ 3 ] * pom[ 3 ] * pom[ 1 ] + pom[ 4 ] * pom[ 4 ] * pom[ 0 ] +
TNL::sign( value ) * pom[ 3 ] * pom[ 4 ] * TNL::sqrt( ( pom[ 3 ] * pom[ 3 ] + pom[ 4 ] * pom[ 4 ] )/( v * v ) -
( pom[ 1 ] - pom[ 0 ] ) * ( pom[ 1 ] - pom[ 0 ] ) ) )/( pom[ 3 ] * pom[ 3 ] + pom[ 4 ] * pom[ 4 ] );
if( fabs( tmp ) < fabs( pom[ 2 ]) )
{
u[ cell.getIndex() ] = argAbsMin( value, tmp );
tmp = value - u[ cell.getIndex() ];
if ( fabs( tmp ) > 0.001*hx ){
return true;
}else{
return false;
}
}
else
{
tmp = ( hy * hy * hz * hz * a + hx * hx * hz * hz * b + hx * hx * hy * hy * c +
TNL::sign( value ) * hx * hy * hz * TNL::sqrt( ( hx * hx * hz * hz + hy * hy * hz * hz + hx * hx * hy * hy)/( v * v ) -
hz * hz * ( a - b ) * ( a - b ) - hy * hy * ( a - c ) * ( a - c ) -
hx * hx * ( b - c ) * ( b - c ) ) )/( hx * hx * hy * hy + hy * hy * hz * hz + hz * hz * hx *hx );
u[ cell.getIndex() ] = argAbsMin( value, tmp );
tmp = value - u[ cell.getIndex() ];
if ( fabs( tmp ) > 0.001*hx ){
return true;
}else{
return false;
}
}
}*/
}
template< typename Real,
typename Device,
typename Index >
template< int sizeSArray >
__cuda_callable__
bool
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
updateCell( volatile Real *sArray, int thri, int thrj, int thrk,
const Real hx, const Real hy, const Real hz, const Real v )
{
const RealType value = sArray[thrk *sizeSArray * sizeSArray + thrj * sizeSArray + thri];
RealType a, b, c, tmp = std::numeric_limits< RealType >::max();
c = TNL::argAbsMin( sArray[ (thrk+1)* sizeSArray*sizeSArray + thrj * sizeSArray + thri ],
sArray[ (thrk-1) * sizeSArray *sizeSArray + thrj* sizeSArray + thri ] );
b = TNL::argAbsMin( sArray[ thrk* sizeSArray*sizeSArray + (thrj+1) * sizeSArray + thri ],
sArray[ thrk* sizeSArray * sizeSArray + (thrj-1)* sizeSArray +thri ] );
a = TNL::argAbsMin( sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri+1 ],
sArray[ thrk* sizeSArray * sizeSArray + thrj* sizeSArray +thri-1 ] );
if( fabs( a ) == std::numeric_limits< RealType >::max() &&
fabs( b ) == std::numeric_limits< RealType >::max() &&
fabs( c ) == std::numeric_limits< RealType >::max() )
return false;
RealType pom[6] = { a, b, c, hx, hy, hz};
tmp = getNewValue( pom , value, v );
sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ] = tmp;
tmp = value - sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ];
if ( fabs( tmp ) > 0.001*hx )
return true;
else
return false;
/*sortMinims( pom );
// calculation of real value taken from ZHAO
tmp = pom[ 0 ] + TNL::sign( value ) * pom[ 3 ]/v;
if( fabs( tmp ) < fabs( pom[ 1 ] ) )
{
sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ] = argAbsMin( value, tmp );
tmp = value - sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ];
if ( fabs( tmp ) > 0.001*hx )
return true;
else
return false;
}
else
{
tmp = ( pom[ 3 ] * pom[ 3 ] * pom[ 1 ] + pom[ 4 ] * pom[ 4 ] * pom[ 0 ] +
TNL::sign( value ) * pom[ 3 ] * pom[ 4 ] * TNL::sqrt( ( pom[ 3 ] * pom[ 3 ] + pom[ 4 ] * pom[ 4 ] )/( v * v ) -
( pom[ 1 ] - pom[ 0 ] ) * ( pom[ 1 ] - pom[ 0 ] ) ) )/( pom[ 3 ] * pom[ 3 ] + pom[ 4 ] * pom[ 4 ] );
if( fabs( tmp ) < fabs( pom[ 2 ]) )
{
sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ] = argAbsMin( value, tmp );
tmp = value - sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ];
if ( fabs( tmp ) > 0.001*hx )
return true;
else
return false;
}
else
{
tmp = ( hy * hy * hz * hz * a + hx * hx * hz * hz * b + hx * hx * hy * hy * c +
TNL::sign( value ) * hx * hy * hz * TNL::sqrt( ( hx * hx * hz * hz + hy * hy * hz * hz + hx * hx * hy * hy)/( v * v ) -
hz * hz * ( a - b ) * ( a - b ) - hy * hy * ( a - c ) * ( a - c ) -
hx * hx * ( b - c ) * ( b - c ) ) )/( hx * hx * hy * hy + hy * hy * hz * hz + hz * hz * hx *hx );
sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ] = argAbsMin( value, tmp );
tmp = value - sArray[ thrk* sizeSArray* sizeSArray + thrj* sizeSArray + thri ];
if ( fabs( tmp ) > 0.001*hx )
return true;
else
return false;
}
}*/
}
template< typename Real,
typename Device,
typename Index >
__cuda_callable__
Real
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
getNewValue( RealType valuesAndSteps[], const RealType originalValue, const RealType v )
{
RealType newValue = std::numeric_limits< RealType >::max();
sortMinims( valuesAndSteps );
// calculation of real value taken from ZHAO
newValue = valuesAndSteps[ 0 ] + TNL::sign( originalValue ) * valuesAndSteps[ 3 ]/v;
if( fabs( newValue ) < fabs( valuesAndSteps[ 1 ] ) )
{
newValue = argAbsMin( originalValue, newValue );
}
else
{
newValue = ( valuesAndSteps[ 3 ] * valuesAndSteps[ 3 ] * valuesAndSteps[ 1 ] +
valuesAndSteps[ 4 ] * valuesAndSteps[ 4 ] * valuesAndSteps[ 0 ] +
TNL::sign( originalValue ) * valuesAndSteps[ 3 ] * valuesAndSteps[ 4 ] *
TNL::sqrt( ( valuesAndSteps[ 3 ] * valuesAndSteps[ 3 ] + valuesAndSteps[ 4 ] * valuesAndSteps[ 4 ] )/( v * v ) -
( valuesAndSteps[ 1 ] - valuesAndSteps[ 0 ] ) *
( valuesAndSteps[ 1 ] - valuesAndSteps[ 0 ] ) ) )/
( valuesAndSteps[ 3 ] * valuesAndSteps[ 3 ] + valuesAndSteps[ 4 ] * valuesAndSteps[ 4 ] );
if( fabs( newValue ) < fabs( valuesAndSteps[ 2 ]) )
{
newValue = argAbsMin( originalValue, newValue );
}
else
{
// Value from algorithm by Zhao
newValue = ( valuesAndSteps[4] * valuesAndSteps[4] * valuesAndSteps[5] * valuesAndSteps[5] * valuesAndSteps[0] +
valuesAndSteps[3] * valuesAndSteps[3] * valuesAndSteps[5] * valuesAndSteps[5] * valuesAndSteps[1] +
valuesAndSteps[3] * valuesAndSteps[3] * valuesAndSteps[4] * valuesAndSteps[4] * valuesAndSteps[2] +
TNL::sign(originalValue) * valuesAndSteps[3] * valuesAndSteps[4] * valuesAndSteps[5] * TNL::sqrt(
(valuesAndSteps[3] * valuesAndSteps[3] * valuesAndSteps[5] * valuesAndSteps[5] +
valuesAndSteps[4] * valuesAndSteps[4] * valuesAndSteps[5] * valuesAndSteps[5] +
valuesAndSteps[3] * valuesAndSteps[3] * valuesAndSteps[4] * valuesAndSteps[4]) / (v * v) -
valuesAndSteps[5] * valuesAndSteps[5] * (valuesAndSteps[0] - valuesAndSteps[1]) * (valuesAndSteps[0] - valuesAndSteps[1]) -
valuesAndSteps[4] * valuesAndSteps[4] * (valuesAndSteps[0] - valuesAndSteps[2]) * (valuesAndSteps[0] - valuesAndSteps[2]) -
valuesAndSteps[3] * valuesAndSteps[3] * (valuesAndSteps[1] - valuesAndSteps[2]) * (valuesAndSteps[1] - valuesAndSteps[2]))) / (
valuesAndSteps[3] * valuesAndSteps[3] * valuesAndSteps[4] * valuesAndSteps[4] +
valuesAndSteps[4] * valuesAndSteps[4] * valuesAndSteps[5] * valuesAndSteps[5] +
valuesAndSteps[5] * valuesAndSteps[5] * valuesAndSteps[3] * valuesAndSteps[3]);
newValue = argAbsMin( originalValue, newValue );
}
}
return newValue;
}
#ifdef HAVE_CUDA
template < typename Real, typename Device, typename Index >
__global__ void CudaInitCaller3d( const Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index > >& input,
Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index > >& output,
Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index >, 3, bool >& interfaceMap,
Containers::StaticVector< 3, Index > vLower, Containers::StaticVector< 3, Index > vUpper )
{
int i = threadIdx.x + blockDim.x*blockIdx.x;
int j = blockDim.y*blockIdx.y + threadIdx.y;
int k = blockDim.z*blockIdx.z + threadIdx.z;
const Meshes::Grid< 3, Real, Device, Index >& mesh = input.template getMesh< Devices::Cuda >();
if( i < mesh.getDimensions().x() && j < mesh.getDimensions().y() && k < mesh.getDimensions().z() )
{
typedef typename Meshes::Grid< 3, Real, Device, Index >::Cell Cell;
Cell cell( mesh );
cell.getCoordinates().x() = i; cell.getCoordinates().y() = j; cell.getCoordinates().z() = k;
cell.refresh();
const Index cind = cell.getIndex();
output[ cind ] =
input( cell ) >= 0 ? std::numeric_limits< Real >::max() :
- std::numeric_limits< Real >::max();
interfaceMap[ cind ] = false;
cell.refresh();
if( i < mesh.getDimensions().x() - vUpper[0] && j < mesh.getDimensions().y() - vUpper[1] &&
k < mesh.getDimensions().y() - vUpper[2] && i>vLower[0]-1 && j> vLower[1]-1 && k>vLower[2]-1 )
{
const Real& hx = mesh.getSpaceSteps().x();
const Real& hy = mesh.getSpaceSteps().y();
const Real& hz = mesh.getSpaceSteps().z();
const Real& c = input( cell );
if( ! cell.isBoundaryEntity() )
{
auto neighbors = cell.getNeighborEntities();
Real pom = 0;
const Index e = neighbors.template getEntityIndex< 1, 0, 0 >();
const Index w = neighbors.template getEntityIndex< -1, 0, 0 >();
const Index n = neighbors.template getEntityIndex< 0, 1, 0 >();
const Index s = neighbors.template getEntityIndex< 0, -1, 0 >();
const Index t = neighbors.template getEntityIndex< 0, 0, 1 >();
const Index b = neighbors.template getEntityIndex< 0, 0, -1 >();
if( c * input[ n ] <= 0 )
{
pom = TNL::sign( c )*( hy * c )/( c - input[ n ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
if( c * input[ e ] <= 0 )
{
pom = TNL::sign( c )*( hx * c )/( c - input[ e ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
if( c * input[ w ] <= 0 )
{
pom = TNL::sign( c )*( hx * c )/( c - input[ w ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
if( c * input[ s ] <= 0 )
{
pom = TNL::sign( c )*( hy * c )/( c - input[ s ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
if( c * input[ b ] <= 0 )
{
pom = TNL::sign( c )*( hz * c )/( c - input[ b ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
if( c * input[ t ] <= 0 )
{
pom = TNL::sign( c )*( hz * c )/( c - input[ t ]);
if( TNL::abs( output[ cind ] ) > TNL::abs( pom ) )
output[ cind ] = pom;
interfaceMap[ cind ] = true;
}
}
}
}
}
template < typename Index >
__global__ void GetNeighbours( TNL::Containers::ArrayView< int, Devices::Cuda, Index > BlockIterDevice,
TNL::Containers::ArrayView< int, Devices::Cuda, Index > BlockIterPom,
int numBlockX, int numBlockY, int numBlockZ )
{
int i = blockIdx.x * 1024 + threadIdx.x;
if( i < numBlockX * numBlockY * numBlockZ )
{
int pom = 0;//BlockIterPom[ i ] = 0;
int m=0, l=0, k=0;
l = i/( numBlockX * numBlockY );
k = (i-l*numBlockX * numBlockY )/(numBlockX );
m = (i-l*numBlockX * numBlockY )%( numBlockX );
if( m > 0 && BlockIterDevice[ i - 1 ] ){ // left neighbour
pom = 1;//BlockIterPom[ i ] = 1;
}else if( m < numBlockX -1 && BlockIterDevice[ i + 1 ] ){ // right neighbour
pom = 1;//BlockIterPom[ i ] = 1;
}else if( k > 0 && BlockIterDevice[ i - numBlockX ] ){ // bottom neighbour
pom = 1;// BlockIterPom[ i ] = 1;
}else if( k < numBlockY -1 && BlockIterDevice[ i + numBlockX ] ){ // top neighbour
pom = 1;//BlockIterPom[ i ] = 1;
}else if( l > 0 && BlockIterDevice[ i - numBlockX*numBlockY ] ){ // neighbour behind
pom = 1;
}else if( l < numBlockZ-1 && BlockIterDevice[ i + numBlockX*numBlockY ] ){ // neighbour in front
pom = 1;
}
if( !BlockIterDevice[ i ] ) // only in CudaUpdateCellCaller can BlockIterDevice gain 0
BlockIterPom[ i ] = pom;
else
BlockIterPom[ i ] = 1;
}
}
template < int sizeSArray, typename Real, typename Device, typename Index >
__global__ void CudaUpdateCellCaller( tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > > ptr,
const Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index >, 3, bool >& interfaceMap,
const Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index > >& aux,
Functions::MeshFunctionView< Meshes::Grid< 3, Real, Device, Index > >& helpFunc,
TNL::Containers::ArrayView< int, Devices::Cuda, Index > BlockIterDevice,
Containers::StaticVector< 3, Index > vecLowerOverlaps, Containers::StaticVector< 3, Index > vecUpperOverlaps )
{
int thri = threadIdx.x; int thrj = threadIdx.y; int thrk = threadIdx.z;
int i = threadIdx.x + blockDim.x*blockIdx.x + vecLowerOverlaps[0]; // WITH OVERLAPS!!! i,j,k aren't coordinates of all values
int j = blockDim.y*blockIdx.y + threadIdx.y + vecLowerOverlaps[1];
int k = blockDim.z*blockIdx.z + threadIdx.z + vecLowerOverlaps[2];
int currentIndex = thrk * blockDim.x * blockDim.y + thrj * blockDim.x + thri;
const Meshes::Grid< 3, Real, Device, Index >& mesh = interfaceMap.template getMesh< Devices::Cuda >();
// should this block calculate?
if( BlockIterDevice[ blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x ] )
{
__syncthreads();
// Array indicates weather some threads calculated (for parallel reduction)
__shared__ volatile bool changed[ (sizeSArray - 2)*(sizeSArray - 2)*(sizeSArray - 2) ];
changed[ currentIndex ] = false;
if( thrj == 0 && thri == 0 && thrk == 0 )
changed[ 0 ] = true; // first indicates weather we should calculate again (princip of parallel reduction)
//getting stepps and size of mesh
const Real hx = mesh.getSpaceSteps().x(); const int dimX = mesh.getDimensions().x();
const Real hy = mesh.getSpaceSteps().y(); const int dimY = mesh.getDimensions().y();
const Real hz = mesh.getSpaceSteps().z(); const int dimZ = mesh.getDimensions().z();
if( thrj == 1 && thri == 1 && thrk == 1 )
{
// we dont know if we will calculate in here, more info down in code
BlockIterDevice[ blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x ] = 0;
}
// sArray contains values of one block (coppied from aux) and edges (not MPI) of those blocks
__shared__ volatile Real sArray[ sizeSArray * sizeSArray * sizeSArray ];
sArray[(thrk+1)* sizeSArray * sizeSArray + (thrj+1) *sizeSArray + thri+1] = std::numeric_limits< Real >::max();
int xkolik = blockDim.x + 1;// maximum of threads in x direction (for all blocks different)
int ykolik = blockDim.y + 1;
int zkolik = blockDim.z + 1;
__syncthreads();
if( gridDim.x - 1 == blockIdx.x )
xkolik = (dimX-vecUpperOverlaps[0]-vecLowerOverlaps[0]) - blockIdx.x*blockDim.x+1;
if( gridDim.y -1 == blockIdx.y )
ykolik = (dimY-vecUpperOverlaps[1]-vecLowerOverlaps[1]) - (blockIdx.y)*blockDim.y+1;
if( gridDim.z-1 == blockIdx.z )
zkolik = (dimZ-vecUpperOverlaps[2]-vecLowerOverlaps[2]) - (blockIdx.z)*blockDim.z+1;
__syncthreads();
//filling sArray edges
if( thri == 0 ) //x bottom
{
if( (blockIdx.x != 0 || vecLowerOverlaps[0] !=0) && thrj+1 < ykolik && thrk+1 < zkolik )
sArray[ (thrk+1 )* sizeSArray * sizeSArray + (thrj+1)*sizeSArray + 0 ] =
aux[ (blockIdx.z*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + (blockIdx.y * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x + thrj * dimX -1 + thrk*dimX*dimY + vecLowerOverlaps[0] ];
else
sArray[(thrk+1)* sizeSArray * sizeSArray + (thrj+1)*sizeSArray + 0] = std::numeric_limits< Real >::max();
}
if( thri == 1 ) //xtop
{
if( dimX - vecLowerOverlaps[ 0 ] > (blockIdx.x+1) * blockDim.x && thrj+1 < ykolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray + (thrj+1) *sizeSArray + xkolik ] =
aux[ (blockIdx.z*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + (blockIdx.y * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x + blockDim.x + thrj * dimX + thrk*dimX*dimY + vecLowerOverlaps[0] ];
else
sArray[ (thrk+1) * sizeSArray * sizeSArray + (thrj+1)*sizeSArray + xkolik] = std::numeric_limits< Real >::max();
}
if( thri == 2 ) //y bottom
{
if( (blockIdx.y != 0 || vecLowerOverlaps[1] !=0) && thrj+1 < xkolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray +0*sizeSArray + thrj+1] =
aux[ (blockIdx.z*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + (blockIdx.y * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x - dimX + thrj + thrk*dimX*dimY + vecLowerOverlaps[0] ];
else
sArray[ (thrk+1) * sizeSArray * sizeSArray + 0*sizeSArray + thrj+1] = std::numeric_limits< Real >::max();
}
if( thri == 3 ) //y top
{
if( dimY - vecLowerOverlaps[ 1 ] > (blockIdx.y+1) * blockDim.y && thrj+1 < xkolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray + ykolik*sizeSArray + thrj+1] =
aux[ (blockIdx.z*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + ((blockIdx.y+1) * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x + thrj + thrk*dimX*dimY + vecLowerOverlaps[0] ];
else
sArray[ (thrk+1) * sizeSArray * sizeSArray + ykolik*sizeSArray + thrj+1] = std::numeric_limits< Real >::max();
}
if( thri == 4 ) //z bottom
{
if( (blockIdx.z != 0 || vecLowerOverlaps[2] !=0) && thrj+1 < ykolik && thrk+1 < xkolik )
sArray[ 0 * sizeSArray * sizeSArray +(thrj+1 )* sizeSArray + thrk+1] =
aux[ (blockIdx.z*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + (blockIdx.y * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x - dimX * dimY + thrj * dimX + thrk + vecLowerOverlaps[0] ];
else
sArray[0 * sizeSArray * sizeSArray + (thrj+1) *sizeSArray + thrk+1] = std::numeric_limits< Real >::max();
}
if( thri == 5 ) //z top
{
if( dimZ - vecLowerOverlaps[ 2 ] > (blockIdx.z+1) * blockDim.z && thrj+1 < ykolik && thrk+1 < xkolik )
sArray[ zkolik * sizeSArray * sizeSArray + (thrj+1) * sizeSArray + thrk+1] =
aux[ ((blockIdx.z+1)*blockDim.z + vecLowerOverlaps[2]) * dimX * dimY + (blockIdx.y * blockDim.y+vecLowerOverlaps[1])*dimX
+ blockIdx.x*blockDim.x + thrj * dimX + thrk + vecLowerOverlaps[0] ];
else
sArray[zkolik * sizeSArray * sizeSArray + (thrj+1) * sizeSArray + thrk+1] = std::numeric_limits< Real >::max();
}
// Copy all other values that aren't edges
if( i - vecLowerOverlaps[0] < dimX && j - vecLowerOverlaps[1] < dimY && k - vecLowerOverlaps[2] < dimZ &&
thri+1 < xkolik + vecUpperOverlaps[0] && thrj+1 < ykolik + vecUpperOverlaps[1] && thrk+1 < zkolik + vecUpperOverlaps[2] )
{
sArray[(thrk+1) * sizeSArray * sizeSArray + (thrj+1) *sizeSArray + thri+1] = aux[ k*dimX*dimY + j*dimX + i ];
}
__syncthreads();
//main while cycle. each value can get information only from neighbour but that information has to spread there
while( changed[ 0 ] )
{
__syncthreads();
changed[ currentIndex ] = false;
//calculation of update cell
if( i < dimX - vecUpperOverlaps[0] && j < dimY - vecUpperOverlaps[1] && k < dimZ - vecUpperOverlaps[2] )
{
if( ! interfaceMap[ k*dimX*dimY + j * dimX + i ] )
{
// calculate new value depending on neighbours in sArray on (thri+1, thrj+1) coordinates
changed[ currentIndex ] = ptr.updateCell< sizeSArray >( sArray, thri+1, thrj+1, thrk+1, hx,hy,hz);
}
}
__syncthreads();
//pyramid reduction (parallel reduction)
if( blockDim.x*blockDim.y*blockDim.z == 1024 )
{
if( currentIndex < 512 )
{
changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 512 ];
}
}
__syncthreads();
if( blockDim.x*blockDim.y*blockDim.z >= 512 )
{
if( currentIndex < 256 )
{
changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 256 ];
}
}
__syncthreads();
if( blockDim.x*blockDim.y*blockDim.z >= 256 )
{
if( currentIndex < 128 )
{
changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 128 ];
}
}
__syncthreads();
if( blockDim.x*blockDim.y*blockDim.z >= 128 )
{
if( currentIndex < 64 )
{
changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 64 ];
}
}
__syncthreads();
if( currentIndex < 32 )
{
if( true ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 32 ];
if( currentIndex < 16 ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 16 ];
if( currentIndex < 8 ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 8 ];
if( currentIndex < 4 ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 4 ];
if( currentIndex < 2 ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 2 ];
if( currentIndex < 1 ) changed[ currentIndex ] = changed[ currentIndex ] || changed[ currentIndex + 1 ];
}
__syncthreads();
// if we calculated, then the BlockIterDevice should contain the info about this whole block! (only one number for one block)
if( changed[ 0 ] && thri == 0 && thrj == 0 && thrk == 0 )
{
BlockIterDevice[ blockIdx.z * gridDim.x * gridDim.y + blockIdx.y * gridDim.x + blockIdx.x ] = 1;
}
__syncthreads();
}
// copy results into helpFunc (not into aux bcs of conflicts)
if( i < dimX && j < dimY && k < dimZ && thri+1 < xkolik && thrj+1 < ykolik && thrk+1 < zkolik )
helpFunc[ k*dimX*dimY + j * dimX + i ] = sArray[ (thrk+1) * sizeSArray * sizeSArray + (thrj+1) * sizeSArray + thri+1 ];
}
else // if not, then it should at least copy the values from aux to helpFunc.
{
if( i < mesh.getDimensions().x() - vecUpperOverlaps[0] && j < mesh.getDimensions().y() - vecUpperOverlaps[1]
&& k < mesh.getDimensions().z() - vecUpperOverlaps[2])
helpFunc[ k * mesh.getDimensions().x() * mesh.getDimensions().y() + j * mesh.getDimensions().x() + i ] =
aux[ k * mesh.getDimensions().x() * mesh.getDimensions().y() + j * mesh.getDimensions().x() + i ];
}
}
#endif
/// ==========================OPEN=MP=================================
template< typename Real,
typename Device,
typename Index >
template< int sizeSArray >
void
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
updateBlocks( const InterfaceMapType interfaceMap,
const MeshFunctionType aux,
MeshFunctionType& helpFunc,
ArrayContainer BlockIterHost, int numThreadsPerBlock/*, Real **sArray*/ )
{
#pragma omp parallel for schedule( dynamic )
for( IndexType i = 0; i < BlockIterHost.getSize(); i++ )
{
if( BlockIterHost[ i ] )
{
MeshType mesh = interfaceMap.template getMesh< Devices::Host >();
int dimX = mesh.getDimensions().x(); int dimY = mesh.getDimensions().y();
int dimZ = mesh.getDimensions().z();
//std::cout << "dimX = " << dimX << " ,dimY = " << dimY << std::endl;
int numOfBlocky = dimY/numThreadsPerBlock + ((dimY%numThreadsPerBlock != 0) ? 1:0);
int numOfBlockx = dimX/numThreadsPerBlock + ((dimX%numThreadsPerBlock != 0) ? 1:0);
int numOfBlockz = dimZ/numThreadsPerBlock + ((dimZ%numThreadsPerBlock != 0) ? 1:0);
//std::cout << "numOfBlockx = " << numOfBlockx << " ,numOfBlocky = " << numOfBlocky << std::endl;
int xkolik = numThreadsPerBlock + 1;
int ykolik = numThreadsPerBlock + 1;
int zkolik = numThreadsPerBlock + 1;
int blIdz = i/( numOfBlockx * numOfBlocky );
int blIdy = (i-blIdz*numOfBlockx * numOfBlocky )/(numOfBlockx );
int blIdx = (i-blIdz*numOfBlockx * numOfBlocky )%( numOfBlockx );
//std::cout << "blIdx = " << blIdx << " ,blIdy = " << blIdy << std::endl;
if( numOfBlockx - 1 == blIdx )
xkolik = dimX - (blIdx)*numThreadsPerBlock+1;
if( numOfBlocky -1 == blIdy )
ykolik = dimY - (blIdy)*numThreadsPerBlock+1;
if( numOfBlockz-1 == blIdz )
zkolik = dimZ - (blIdz)*numThreadsPerBlock+1;
//std::cout << "xkolik = " << xkolik << " ,ykolik = " << ykolik << std::endl;
/*bool changed[numThreadsPerBlock*numThreadsPerBlock];
changed[ 0 ] = 1;*/
Real hx = mesh.getSpaceSteps().x();
Real hy = mesh.getSpaceSteps().y();
Real hz = mesh.getSpaceSteps().z();
bool changed = false;
BlockIterHost[ i ] = 0;
Real *sArray;
sArray = new Real[ sizeSArray * sizeSArray * sizeSArray ];
if( sArray == nullptr )
std::cout << "Error while allocating memory for sArray." << std::endl;
for( IndexType k = 0; k < sizeSArray; k++ )
for( IndexType l = 0; l < sizeSArray; l++ )
for( IndexType m = 0; m < sizeSArray; m++ ){
sArray[ m * sizeSArray * sizeSArray + k * sizeSArray + l ] = std::numeric_limits< Real >::max();
}
for( IndexType thrk = 0; thrk < numThreadsPerBlock; thrk++ )
for( IndexType thrj = 0; thrj < numThreadsPerBlock; thrj++ )
{
if( blIdx != 0 && thrj+1 < ykolik && thrk+1 < zkolik )
sArray[(thrk+1 )* sizeSArray * sizeSArray + (thrj+1)*sizeSArray + 0] =
aux[ blIdz*numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock*dimX + blIdx*numThreadsPerBlock + thrj * dimX -1 + thrk*dimX*dimY ];
if( dimX > (blIdx+1) * numThreadsPerBlock && thrj+1 < ykolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray + (thrj+1) *sizeSArray + xkolik ] =
aux[ blIdz*numThreadsPerBlock * dimX * dimY + blIdy *numThreadsPerBlock*dimX+ blIdx*numThreadsPerBlock + numThreadsPerBlock + thrj * dimX + thrk*dimX*dimY ];
if( blIdy != 0 && thrj+1 < xkolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray +0*sizeSArray + thrj+1] =
aux[ blIdz*numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock*dimX + blIdx*numThreadsPerBlock - dimX + thrj + thrk*dimX*dimY ];
if( dimY > (blIdy+1) * numThreadsPerBlock && thrj+1 < xkolik && thrk+1 < zkolik )
sArray[ (thrk+1) * sizeSArray * sizeSArray + ykolik*sizeSArray + thrj+1] =
aux[ blIdz*numThreadsPerBlock * dimX * dimY + (blIdy+1) * numThreadsPerBlock*dimX + blIdx*numThreadsPerBlock + thrj + thrk*dimX*dimY ];
if( blIdz != 0 && thrj+1 < ykolik && thrk+1 < xkolik )
sArray[ 0 * sizeSArray * sizeSArray +(thrj+1 )* sizeSArray + thrk+1] =
aux[ blIdz*numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock*dimX + blIdx*numThreadsPerBlock - dimX * dimY + thrj * dimX + thrk ];
if( dimZ > (blIdz+1) * numThreadsPerBlock && thrj+1 < ykolik && thrk+1 < xkolik )
sArray[zkolik * sizeSArray * sizeSArray + (thrj+1) * sizeSArray + thrk+1] =
aux[ (blIdz+1)*numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock*dimX + blIdx*numThreadsPerBlock + thrj * dimX + thrk ];
}
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = 0; l < numThreadsPerBlock; l++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
sArray[(m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1] =
aux[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ];
}
}
}
/*string s;
int numWhile = 0;
for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = 0; l < numThreadsPerBlock; l++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ ){
//std::cout << "proslo i = " << k * numThreadsPerBlock + l << std::endl;
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
//printf("In with point m = %d, k = %d, l = %d\n", m, k, l);
changed = this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz) || changed;
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = numThreadsPerBlock-1; m >-1; m-- ){
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = 0; l <numThreadsPerBlock; l++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = numThreadsPerBlock-1; l >-1; l-- ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );
*/
for( IndexType m = numThreadsPerBlock-1; m >-1; m-- ){
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = numThreadsPerBlock-1; l >-1; l-- ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
for( IndexType k = numThreadsPerBlock-1; k > -1; k-- ){
for( IndexType l = 0; l <numThreadsPerBlock; l++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = numThreadsPerBlock-1; m >-1; m-- ){
for( IndexType k = numThreadsPerBlock-1; k > -1; k-- ){
for( IndexType l = 0; l <numThreadsPerBlock; l++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
for( IndexType k = numThreadsPerBlock-1; k > -1; k-- ){
for( IndexType l = numThreadsPerBlock-1; l >-1; l-- ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
for( IndexType m = numThreadsPerBlock-1; m >-1; m-- ){
for( IndexType k = numThreadsPerBlock-1; k > -1; k-- ){
for( IndexType l = numThreadsPerBlock-1; l >-1; l-- ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
{
if( ! interfaceMap[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] )
{
this->template updateCell< sizeSArray >( sArray, l+1, k+1, m+1, hx,hy,hz);
}
}
}
}
}
/*for( int k = 0; k < numThreadsPerBlock; k++ ){
for( int l = 0; l < numThreadsPerBlock; l++ )
for( int m = 0; m < numThreadsPerBlock; m++ )
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ )
helpFunc[ m*dimX*dimY + k*dimX + l ] = sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
}
numWhile++;
s = "helpFunc-"+ std::to_string(numWhile) + ".tnl";
helpFunc.save( s );*/
if( changed ){
BlockIterHost[ i ] = 1;
}
for( IndexType k = 0; k < numThreadsPerBlock; k++ ){
for( IndexType l = 0; l < numThreadsPerBlock; l++ ) {
for( IndexType m = 0; m < numThreadsPerBlock; m++ ){
if( blIdy * numThreadsPerBlock + k < dimY && blIdx * numThreadsPerBlock + l < dimX && blIdz * numThreadsPerBlock + m < dimZ ){
helpFunc[ blIdz * numThreadsPerBlock * dimX * dimY + blIdy * numThreadsPerBlock * dimX + blIdx*numThreadsPerBlock + m*dimX*dimY + k*dimX + l ] =
sArray[ (m+1) * sizeSArray * sizeSArray + (k+1) *sizeSArray + l+1 ];
//std::cout << helpFunc[ m*dimX*dimY + k*dimX + l ] << " ";
}
}
//std::cout << std::endl;
}
//std::cout << std::endl;
}
//helpFunc.save( "helpF.tnl");
delete []sArray;
}
}
}
template< typename Real,
typename Device,
typename Index >
void
tnlDirectEikonalMethodsBase< Meshes::Grid< 3, Real, Device, Index > >::
getNeighbours( ArrayContainerView BlockIterHost, int numBlockX, int numBlockY, int numBlockZ )
{
int* BlockIterPom;
BlockIterPom = new int [ numBlockX * numBlockY * numBlockZ ];
for( int i = 0; i< BlockIterHost.getSize(); i++)
{
BlockIterPom[ i ] = 0;
int m=0, l=0, k=0;
l = i/( numBlockX * numBlockY );
k = (i-l*numBlockX * numBlockY )/(numBlockX );
m = (i-l*numBlockX * numBlockY )%( numBlockX );
if( m > 0 && BlockIterHost[ i - 1 ] ){
BlockIterPom[ i ] = 1;
}else if( m < numBlockX -1 && BlockIterHost[ i + 1 ] ){
BlockIterPom[ i ] = 1;
}else if( k > 0 && BlockIterHost[ i - numBlockX ] ){
BlockIterPom[ i ] = 1;
}else if( k < numBlockY -1 && BlockIterHost[ i + numBlockX ] ){
BlockIterPom[ i ] = 1;
}else if( l > 0 && BlockIterHost[ i - numBlockX*numBlockY ] ){
BlockIterPom[ i ] = 1;
}else if( l < numBlockZ-1 && BlockIterHost[ i + numBlockX*numBlockY ] ){
BlockIterPom[ i ] = 1;
}
}
for( int i = 0; i< BlockIterHost.getSize(); i++)
{
BlockIterHost[ i ] = BlockIterPom[ i ];
}
}
|
_snobal.h | /*
** NAME
** _snobal.h
**
** DESCRIPTION
** Private include file for the snobal library.
*/
#ifndef _PRIV_SNOBAL_H_
#define _PRIV_SNOBAL_H_
#include "snobal.h"
/* ------------------------------------------------------------------------ */
/*
* Private routines in the snobal library.
*/
extern void _adj_layers (void);
extern void _adj_snow (double delta_z_s, double delta_m_s);
extern void _advec (void);
extern int _below_thold (double threshold);
extern void _calc_layers (void);
extern double _cold_content (double temp, double mass);
extern int _divide_tstep (TSTEP_REC * tstep);
extern int _do_tstep (TSTEP_REC * tstep);
extern int _e_bal (void);
extern int _evap_cond (void);
extern void _h2o_compact (void);
extern int _h_le (void);
extern void _layer_mass (void);
extern int _mass_bal (void);
extern void _net_rad (void);
extern void _new_density (void);
extern void _open_files (void);
extern void _precip (void);
extern void _runoff (void);
extern void _snowmelt (void);
extern void _time_compact (void);
/* ------------------------------------------------------------------------ */
/*
* Global variables that are private (internal) to the snobal library.
*/
extern INPUT_REC input_deltas[4]; /* deltas for climate-input parameters
over each timestep */
typedef struct {
double m_pp; /* total precipitation mass (kg/m^2) */
double m_rain; /* mass of rain in precip (kg/m^2) */
double m_snow; /* " " snow " " (kg/m^2) */
double z_snow; /* depth of snow in " (m) */
} PRECIP_REC;
extern PRECIP_REC precip_info[4]; /* array of precip info adjusted for
each timestep */
extern int computed[4]; /* array of flags for each timestep;
TRUE if computed values for input
deltas and precip arrays */
extern double h2o_sat_snow; /* snowfall's % of liquid H2O saturation */
extern int isothermal; /* melting? */
extern int snowcover; /* snow on gnd at start of current timestep? */
#pragma omp threadprivate(input_deltas, precip_info, computed, h2o_sat_snow, isothermal, snowcover)
#endif /* _PRIV_SNOBAL_H_ */
|
StmtOpenMP.h | //===- StmtOpenMP.h - Classes for OpenMP directives ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
/// \file
/// \brief This file defines OpenMP AST classes for executable directives and
/// clauses.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMTOPENMP_H
#define LLVM_CLANG_AST_STMTOPENMP_H
#include "clang/AST/Expr.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Stmt.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
namespace clang {
//===----------------------------------------------------------------------===//
// AST classes for directives.
//===----------------------------------------------------------------------===//
/// \brief This is a basic class for representing single OpenMP executable
/// directive.
///
class OMPExecutableDirective : public Stmt {
friend class ASTStmtReader;
/// \brief Kind of the directive.
OpenMPDirectiveKind Kind;
/// \brief Starting location of the directive (directive keyword).
SourceLocation StartLoc;
/// \brief Ending location of the directive.
SourceLocation EndLoc;
/// \brief Numbers of clauses.
const unsigned NumClauses;
/// \brief Number of child expressions/stmts.
const unsigned NumChildren;
/// \brief Offset from this to the start of clauses.
/// There are NumClauses pointers to clauses, they are followed by
/// NumChildren pointers to child stmts/exprs (if the directive type
/// requires an associated stmt, then it has to be the first of them).
const unsigned ClausesOffset;
/// \brief Get the clauses storage.
MutableArrayRef<OMPClause *> getClauses() {
OMPClause **ClauseStorage = reinterpret_cast<OMPClause **>(
reinterpret_cast<char *>(this) + ClausesOffset);
return MutableArrayRef<OMPClause *>(ClauseStorage, NumClauses);
}
protected:
/// \brief Build instance of directive of class \a K.
///
/// \param SC Statement class.
/// \param K Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
///
template <typename T>
OMPExecutableDirective(const T *, StmtClass SC, OpenMPDirectiveKind K,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses, unsigned NumChildren)
: Stmt(SC), Kind(K), StartLoc(std::move(StartLoc)),
EndLoc(std::move(EndLoc)), NumClauses(NumClauses),
NumChildren(NumChildren),
ClausesOffset(llvm::alignTo(sizeof(T), llvm::alignOf<OMPClause *>())) {}
/// \brief Sets the list of variables for this clause.
///
/// \param Clauses The list of clauses for the directive.
///
void setClauses(ArrayRef<OMPClause *> Clauses);
/// \brief Set the associated statement for the directive.
///
/// /param S Associated statement.
///
void setAssociatedStmt(Stmt *S) {
assert(hasAssociatedStmt() && "no associated statement.");
*child_begin() = S;
}
public:
/// \brief Iterates over a filtered subrange of clauses applied to a
/// directive.
///
/// This iterator visits only clauses of type SpecificClause.
template <typename SpecificClause>
class specific_clause_iterator
: public llvm::iterator_adaptor_base<
specific_clause_iterator<SpecificClause>,
ArrayRef<OMPClause *>::const_iterator, std::forward_iterator_tag,
const SpecificClause *, ptrdiff_t, const SpecificClause *,
const SpecificClause *> {
ArrayRef<OMPClause *>::const_iterator End;
void SkipToNextClause() {
while (this->I != End && !isa<SpecificClause>(*this->I))
++this->I;
}
public:
explicit specific_clause_iterator(ArrayRef<OMPClause *> Clauses)
: specific_clause_iterator::iterator_adaptor_base(Clauses.begin()),
End(Clauses.end()) {
SkipToNextClause();
}
const SpecificClause *operator*() const {
return cast<SpecificClause>(*this->I);
}
const SpecificClause *operator->() const { return **this; }
specific_clause_iterator &operator++() {
++this->I;
SkipToNextClause();
return *this;
}
};
template <typename SpecificClause>
static llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind(ArrayRef<OMPClause *> Clauses) {
return {specific_clause_iterator<SpecificClause>(Clauses),
specific_clause_iterator<SpecificClause>(
llvm::makeArrayRef(Clauses.end(), 0))};
}
template <typename SpecificClause>
llvm::iterator_range<specific_clause_iterator<SpecificClause>>
getClausesOfKind() const {
return getClausesOfKind<SpecificClause>(clauses());
}
/// Gets a single clause of the specified kind associated with the
/// current directive iff there is only one clause of this kind (and assertion
/// is fired if there is more than one clause is associated with the
/// directive). Returns nullptr if no clause of this kind is associated with
/// the directive.
template <typename SpecificClause>
const SpecificClause *getSingleClause() const {
auto Clauses = getClausesOfKind<SpecificClause>();
if (Clauses.begin() != Clauses.end()) {
assert(std::next(Clauses.begin()) == Clauses.end() &&
"There are at least 2 clauses of the specified kind");
return *Clauses.begin();
}
return nullptr;
}
/// Returns true if the current directive has one or more clauses of a
/// specific kind.
template <typename SpecificClause>
bool hasClausesOfKind() const {
auto Clauses = getClausesOfKind<SpecificClause>();
return Clauses.begin() != Clauses.end();
}
/// \brief Returns starting location of directive kind.
SourceLocation getLocStart() const { return StartLoc; }
/// \brief Returns ending location of directive.
SourceLocation getLocEnd() const { return EndLoc; }
/// \brief Set starting location of directive kind.
///
/// \param Loc New starting location of directive.
///
void setLocStart(SourceLocation Loc) { StartLoc = Loc; }
/// \brief Set ending location of directive.
///
/// \param Loc New ending location of directive.
///
void setLocEnd(SourceLocation Loc) { EndLoc = Loc; }
/// \brief Get number of clauses.
unsigned getNumClauses() const { return NumClauses; }
/// \brief Returns specified clause.
///
/// \param i Number of clause.
///
OMPClause *getClause(unsigned i) const { return clauses()[i]; }
/// \brief Returns true if directive has associated statement.
bool hasAssociatedStmt() const { return NumChildren > 0; }
/// \brief Returns statement associated with the directive.
Stmt *getAssociatedStmt() const {
assert(hasAssociatedStmt() && "no associated statement.");
return const_cast<Stmt *>(*child_begin());
}
OpenMPDirectiveKind getDirectiveKind() const { return Kind; }
static bool classof(const Stmt *S) {
return S->getStmtClass() >= firstOMPExecutableDirectiveConstant &&
S->getStmtClass() <= lastOMPExecutableDirectiveConstant;
}
child_range children() {
if (!hasAssociatedStmt())
return child_range(child_iterator(), child_iterator());
Stmt **ChildStorage = reinterpret_cast<Stmt **>(getClauses().end());
return child_range(ChildStorage, ChildStorage + NumChildren);
}
ArrayRef<OMPClause *> clauses() { return getClauses(); }
ArrayRef<OMPClause *> clauses() const {
return const_cast<OMPExecutableDirective *>(this)->getClauses();
}
};
/// \brief This represents '#pragma omp parallel' directive.
///
/// \code
/// #pragma omp parallel private(a,b) reduction(+: c,d)
/// \endcode
/// In this example directive '#pragma omp parallel' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if the construct has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending Location of the directive.
///
OMPParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelDirectiveClass, OMPD_parallel,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement associated with the directive.
/// \param HasCancel true if this directive has inner cancel directive.
///
static OMPParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelDirectiveClass;
}
};
/// \brief This is a common base class for loop directives ('omp simd', 'omp
/// for', 'omp for simd' etc.). It is responsible for the loop code generation.
///
class OMPLoopDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Number of collapsed loops as specified by 'collapse' clause.
unsigned CollapsedNum;
/// \brief Offsets to the stored exprs.
/// This enumeration contains offsets to all the pointers to children
/// expressions stored in OMPLoopDirective.
/// The first 9 children are nesessary for all the loop directives, and
/// the next 7 are specific to the worksharing ones.
/// After the fixed children, three arrays of length CollapsedNum are
/// allocated: loop counters, their updates and final values.
///
enum {
AssociatedStmtOffset = 0,
IterationVariableOffset = 1,
LastIterationOffset = 2,
CalcLastIterationOffset = 3,
PreConditionOffset = 4,
CondOffset = 5,
InitOffset = 6,
IncOffset = 7,
PreInitsOffset = 8,
// The '...End' enumerators do not correspond to child expressions - they
// specify the offset to the end (and start of the following counters/
// updates/finals arrays).
DefaultEnd = 9,
// The following 7 exprs are used by worksharing loops only.
IsLastIterVariableOffset = 9,
LowerBoundVariableOffset = 10,
UpperBoundVariableOffset = 11,
StrideVariableOffset = 12,
EnsureUpperBoundOffset = 13,
NextLowerBoundOffset = 14,
NextUpperBoundOffset = 15,
// Offset to the end (and start of the following counters/updates/finals
// arrays) for worksharing loop directives.
WorksharingEnd = 16,
};
/// \brief Get the counters storage.
MutableArrayRef<Expr *> getCounters() {
Expr **Storage = reinterpret_cast<Expr **>(
&(*(std::next(child_begin(), getArraysOffset(getDirectiveKind())))));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the private counters storage.
MutableArrayRef<Expr *> getPrivateCounters() {
Expr **Storage = reinterpret_cast<Expr **>(&*std::next(
child_begin(), getArraysOffset(getDirectiveKind()) + CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getInits() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 2 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the updates storage.
MutableArrayRef<Expr *> getUpdates() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 3 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
/// \brief Get the final counter updates storage.
MutableArrayRef<Expr *> getFinals() {
Expr **Storage = reinterpret_cast<Expr **>(
&*std::next(child_begin(),
getArraysOffset(getDirectiveKind()) + 4 * CollapsedNum));
return MutableArrayRef<Expr *>(Storage, CollapsedNum);
}
protected:
/// \brief Build instance of loop directive of class \a Kind.
///
/// \param SC Statement class.
/// \param Kind Kind of OpenMP directive.
/// \param StartLoc Starting location of the directive (directive keyword).
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed loops from 'collapse' clause.
/// \param NumClauses Number of clauses.
/// \param NumSpecialChildren Number of additional directive-specific stmts.
///
template <typename T>
OMPLoopDirective(const T *That, StmtClass SC, OpenMPDirectiveKind Kind,
SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses,
unsigned NumSpecialChildren = 0)
: OMPExecutableDirective(That, SC, Kind, StartLoc, EndLoc, NumClauses,
numLoopChildren(CollapsedNum, Kind) +
NumSpecialChildren),
CollapsedNum(CollapsedNum) {}
/// \brief Offset to the start of children expression arrays.
static unsigned getArraysOffset(OpenMPDirectiveKind Kind) {
return (isOpenMPWorksharingDirective(Kind) ||
isOpenMPTaskLoopDirective(Kind) ||
isOpenMPDistributeDirective(Kind))
? WorksharingEnd
: DefaultEnd;
}
/// \brief Children number.
static unsigned numLoopChildren(unsigned CollapsedNum,
OpenMPDirectiveKind Kind) {
return getArraysOffset(Kind) + 5 * CollapsedNum; // Counters,
// PrivateCounters, Inits,
// Updates and Finals
}
void setIterationVariable(Expr *IV) {
*std::next(child_begin(), IterationVariableOffset) = IV;
}
void setLastIteration(Expr *LI) {
*std::next(child_begin(), LastIterationOffset) = LI;
}
void setCalcLastIteration(Expr *CLI) {
*std::next(child_begin(), CalcLastIterationOffset) = CLI;
}
void setPreCond(Expr *PC) {
*std::next(child_begin(), PreConditionOffset) = PC;
}
void setCond(Expr *Cond) {
*std::next(child_begin(), CondOffset) = Cond;
}
void setInit(Expr *Init) { *std::next(child_begin(), InitOffset) = Init; }
void setInc(Expr *Inc) { *std::next(child_begin(), IncOffset) = Inc; }
void setPreInits(Stmt *PreInits) {
*std::next(child_begin(), PreInitsOffset) = PreInits;
}
void setIsLastIterVariable(Expr *IL) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), IsLastIterVariableOffset) = IL;
}
void setLowerBoundVariable(Expr *LB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), LowerBoundVariableOffset) = LB;
}
void setUpperBoundVariable(Expr *UB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), UpperBoundVariableOffset) = UB;
}
void setStrideVariable(Expr *ST) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), StrideVariableOffset) = ST;
}
void setEnsureUpperBound(Expr *EUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), EnsureUpperBoundOffset) = EUB;
}
void setNextLowerBound(Expr *NLB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextLowerBoundOffset) = NLB;
}
void setNextUpperBound(Expr *NUB) {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
*std::next(child_begin(), NextUpperBoundOffset) = NUB;
}
void setCounters(ArrayRef<Expr *> A);
void setPrivateCounters(ArrayRef<Expr *> A);
void setInits(ArrayRef<Expr *> A);
void setUpdates(ArrayRef<Expr *> A);
void setFinals(ArrayRef<Expr *> A);
public:
/// \brief The expressions built for the OpenMP loop CodeGen for the
/// whole collapsed loop nest.
struct HelperExprs {
/// \brief Loop iteration variable.
Expr *IterationVarRef;
/// \brief Loop last iteration number.
Expr *LastIteration;
/// \brief Loop number of iterations.
Expr *NumIterations;
/// \brief Calculation of last iteration.
Expr *CalcLastIteration;
/// \brief Loop pre-condition.
Expr *PreCond;
/// \brief Loop condition.
Expr *Cond;
/// \brief Loop iteration variable init.
Expr *Init;
/// \brief Loop increment.
Expr *Inc;
/// \brief IsLastIteration - local flag variable passed to runtime.
Expr *IL;
/// \brief LowerBound - local variable passed to runtime.
Expr *LB;
/// \brief UpperBound - local variable passed to runtime.
Expr *UB;
/// \brief Stride - local variable passed to runtime.
Expr *ST;
/// \brief EnsureUpperBound -- expression LB = min(LB, NumIterations).
Expr *EUB;
/// \brief Update of LowerBound for statically sheduled 'omp for' loops.
Expr *NLB;
/// \brief Update of UpperBound for statically sheduled 'omp for' loops.
Expr *NUB;
/// \brief Counters Loop counters.
SmallVector<Expr *, 4> Counters;
/// \brief PrivateCounters Loop counters.
SmallVector<Expr *, 4> PrivateCounters;
/// \brief Expressions for loop counters inits for CodeGen.
SmallVector<Expr *, 4> Inits;
/// \brief Expressions for loop counters update for CodeGen.
SmallVector<Expr *, 4> Updates;
/// \brief Final loop counter values for GodeGen.
SmallVector<Expr *, 4> Finals;
/// Init statement for all captured expressions.
Stmt *PreInits;
/// \brief Check if all the expressions are built (does not check the
/// worksharing ones).
bool builtAll() {
return IterationVarRef != nullptr && LastIteration != nullptr &&
NumIterations != nullptr && PreCond != nullptr &&
Cond != nullptr && Init != nullptr && Inc != nullptr;
}
/// \brief Initialize all the fields to null.
/// \param Size Number of elements in the counters/finals/updates arrays.
void clear(unsigned Size) {
IterationVarRef = nullptr;
LastIteration = nullptr;
CalcLastIteration = nullptr;
PreCond = nullptr;
Cond = nullptr;
Init = nullptr;
Inc = nullptr;
IL = nullptr;
LB = nullptr;
UB = nullptr;
ST = nullptr;
EUB = nullptr;
NLB = nullptr;
NUB = nullptr;
Counters.resize(Size);
PrivateCounters.resize(Size);
Inits.resize(Size);
Updates.resize(Size);
Finals.resize(Size);
for (unsigned i = 0; i < Size; ++i) {
Counters[i] = nullptr;
PrivateCounters[i] = nullptr;
Inits[i] = nullptr;
Updates[i] = nullptr;
Finals[i] = nullptr;
}
PreInits = nullptr;
}
};
/// \brief Get number of collapsed loops.
unsigned getCollapsedNumber() const { return CollapsedNum; }
Expr *getIterationVariable() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IterationVariableOffset)));
}
Expr *getLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LastIterationOffset)));
}
Expr *getCalcLastIteration() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), CalcLastIterationOffset)));
}
Expr *getPreCond() const {
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), PreConditionOffset)));
}
Expr *getCond() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), CondOffset)));
}
Expr *getInit() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), InitOffset)));
}
Expr *getInc() const {
return const_cast<Expr *>(
reinterpret_cast<const Expr *>(*std::next(child_begin(), IncOffset)));
}
const Stmt *getPreInits() const {
return *std::next(child_begin(), PreInitsOffset);
}
Stmt *getPreInits() { return *std::next(child_begin(), PreInitsOffset); }
Expr *getIsLastIterVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), IsLastIterVariableOffset)));
}
Expr *getLowerBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), LowerBoundVariableOffset)));
}
Expr *getUpperBoundVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), UpperBoundVariableOffset)));
}
Expr *getStrideVariable() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), StrideVariableOffset)));
}
Expr *getEnsureUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), EnsureUpperBoundOffset)));
}
Expr *getNextLowerBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextLowerBoundOffset)));
}
Expr *getNextUpperBound() const {
assert((isOpenMPWorksharingDirective(getDirectiveKind()) ||
isOpenMPTaskLoopDirective(getDirectiveKind()) ||
isOpenMPDistributeDirective(getDirectiveKind())) &&
"expected worksharing loop directive");
return const_cast<Expr *>(reinterpret_cast<const Expr *>(
*std::next(child_begin(), NextUpperBoundOffset)));
}
const Stmt *getBody() const {
// This relies on the loop form is already checked by Sema.
Stmt *Body = getAssociatedStmt()->IgnoreContainers(true);
Body = cast<ForStmt>(Body)->getBody();
for (unsigned Cnt = 1; Cnt < CollapsedNum; ++Cnt) {
Body = Body->IgnoreContainers();
Body = cast<ForStmt>(Body)->getBody();
}
return Body;
}
ArrayRef<Expr *> counters() { return getCounters(); }
ArrayRef<Expr *> counters() const {
return const_cast<OMPLoopDirective *>(this)->getCounters();
}
ArrayRef<Expr *> private_counters() { return getPrivateCounters(); }
ArrayRef<Expr *> private_counters() const {
return const_cast<OMPLoopDirective *>(this)->getPrivateCounters();
}
ArrayRef<Expr *> inits() { return getInits(); }
ArrayRef<Expr *> inits() const {
return const_cast<OMPLoopDirective *>(this)->getInits();
}
ArrayRef<Expr *> updates() { return getUpdates(); }
ArrayRef<Expr *> updates() const {
return const_cast<OMPLoopDirective *>(this)->getUpdates();
}
ArrayRef<Expr *> finals() { return getFinals(); }
ArrayRef<Expr *> finals() const {
return const_cast<OMPLoopDirective *>(this)->getFinals();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass ||
T->getStmtClass() == OMPForDirectiveClass ||
T->getStmtClass() == OMPForSimdDirectiveClass ||
T->getStmtClass() == OMPParallelForDirectiveClass ||
T->getStmtClass() == OMPParallelForSimdDirectiveClass ||
T->getStmtClass() == OMPTaskLoopDirectiveClass ||
T->getStmtClass() == OMPTaskLoopSimdDirectiveClass ||
T->getStmtClass() == OMPDistributeDirectiveClass ||
T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp simd' directive.
///
/// \code
/// #pragma omp simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd, StartLoc,
EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPSimdDirectiveClass, OMPD_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPSimdDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt,
const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPSimdDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp for' directive.
///
/// \code
/// #pragma omp for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for' has clauses 'private' with the
/// variables 'a' and 'b' and 'reduction' with operator '+' and variables 'c'
/// and 'd'.
///
class OMPForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForDirectiveClass, OMPD_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPForDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned CollapsedNum,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs,
bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForDirectiveClass;
}
};
/// \brief This represents '#pragma omp for simd' directive.
///
/// \code
/// #pragma omp for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp for simd' has clauses 'private'
/// with the variables 'a' and 'b', 'linear' with variables 'i', 'j' and
/// linear step 's', 'reduction' with operator '+' and variables 'c' and 'd'.
///
class OMPForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPForSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPForSimdDirectiveClass, OMPD_for_simd,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp sections' directive.
///
/// \code
/// #pragma omp sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp sections' has clauses 'private' with
/// the variables 'a' and 'b' and 'reduction' with operator '+' and variables
/// 'c' and 'd'.
///
class OMPSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
StartLoc, EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSectionsDirectiveClass, OMPD_sections,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSectionsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp section' directive.
///
/// \code
/// #pragma omp section
/// \endcode
///
class OMPSectionDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPSectionDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
StartLoc, EndLoc, 0, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
explicit OMPSectionDirective()
: OMPExecutableDirective(this, OMPSectionDirectiveClass, OMPD_section,
SourceLocation(), SourceLocation(), 0, 1),
HasCancel(false) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner directive.
///
static OMPSectionDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPSectionDirective *CreateEmpty(const ASTContext &C, EmptyShell);
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSectionDirectiveClass;
}
};
/// \brief This represents '#pragma omp single' directive.
///
/// \code
/// #pragma omp single private(a,b) copyprivate(c,d)
/// \endcode
/// In this example directive '#pragma omp single' has clauses 'private' with
/// the variables 'a' and 'b' and 'copyprivate' with variables 'c' and 'd'.
///
class OMPSingleDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPSingleDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPSingleDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPSingleDirectiveClass, OMPD_single,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPSingleDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPSingleDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPSingleDirectiveClass;
}
};
/// \brief This represents '#pragma omp master' directive.
///
/// \code
/// #pragma omp master
/// \endcode
///
class OMPMasterDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPMasterDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPMasterDirective()
: OMPExecutableDirective(this, OMPMasterDirectiveClass, OMPD_master,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPMasterDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPMasterDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPMasterDirectiveClass;
}
};
/// \brief This represents '#pragma omp critical' directive.
///
/// \code
/// #pragma omp critical
/// \endcode
///
class OMPCriticalDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Name of the directive.
DeclarationNameInfo DirName;
/// \brief Build directive with the given start and end location.
///
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCriticalDirective(const DeclarationNameInfo &Name, SourceLocation StartLoc,
SourceLocation EndLoc, unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
StartLoc, EndLoc, NumClauses, 1),
DirName(Name) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPCriticalDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCriticalDirectiveClass, OMPD_critical,
SourceLocation(), SourceLocation(), NumClauses,
1),
DirName() {}
/// \brief Set name of the directive.
///
/// \param Name Name of the directive.
///
void setDirectiveName(const DeclarationNameInfo &Name) { DirName = Name; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param Name Name of the directive.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPCriticalDirective *
Create(const ASTContext &C, const DeclarationNameInfo &Name,
SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCriticalDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Return name of the directive.
///
DeclarationNameInfo getDirectiveName() const { return DirName; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCriticalDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for' directive.
///
/// \code
/// #pragma omp parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for' has clauses 'private'
/// with the variables 'a' and 'b' and 'reduction' with operator '+' and
/// variables 'c' and 'd'.
///
class OMPParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
StartLoc, EndLoc, CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForDirectiveClass, OMPD_parallel_for,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel for simd' directive.
///
/// \code
/// #pragma omp parallel for simd private(a,b) linear(i,j:s) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel for simd' has clauses
/// 'private' with the variables 'a' and 'b', 'linear' with variables 'i', 'j'
/// and linear step 's', 'reduction' with operator '+' and variables 'c' and
/// 'd'.
///
class OMPParallelForSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPParallelForSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPParallelForSimdDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPParallelForSimdDirectiveClass,
OMPD_parallel_for_simd, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPParallelForSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPParallelForSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelForSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp parallel sections' directive.
///
/// \code
/// #pragma omp parallel sections private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp parallel sections' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPParallelSectionsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if current directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPParallelSectionsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, StartLoc, EndLoc,
NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPParallelSectionsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPParallelSectionsDirectiveClass,
OMPD_parallel_sections, SourceLocation(),
SourceLocation(), NumClauses, 1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPParallelSectionsDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPParallelSectionsDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPParallelSectionsDirectiveClass;
}
};
/// \brief This represents '#pragma omp task' directive.
///
/// \code
/// #pragma omp task private(a,b) final(d)
/// \endcode
/// In this example directive '#pragma omp task' has clauses 'private' with the
/// variables 'a' and 'b' and 'final' with condition 'd'.
///
class OMPTaskDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief true if this directive has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTaskDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task, StartLoc,
EndLoc, NumClauses, 1),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTaskDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTaskDirectiveClass, OMPD_task,
SourceLocation(), SourceLocation(), NumClauses,
1),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param HasCancel true, if current directive has inner cancel directive.
///
static OMPTaskDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, bool HasCancel);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTaskDirective *CreateEmpty(const ASTContext &C, unsigned NumClauses,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskyield' directive.
///
/// \code
/// #pragma omp taskyield
/// \endcode
///
class OMPTaskyieldDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskyieldDirective()
: OMPExecutableDirective(this, OMPTaskyieldDirectiveClass, OMPD_taskyield,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskyieldDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskyieldDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskyieldDirectiveClass;
}
};
/// \brief This represents '#pragma omp barrier' directive.
///
/// \code
/// #pragma omp barrier
/// \endcode
///
class OMPBarrierDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPBarrierDirective()
: OMPExecutableDirective(this, OMPBarrierDirectiveClass, OMPD_barrier,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPBarrierDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPBarrierDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPBarrierDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskwait' directive.
///
/// \code
/// #pragma omp taskwait
/// \endcode
///
class OMPTaskwaitDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
StartLoc, EndLoc, 0, 0) {}
/// \brief Build an empty directive.
///
explicit OMPTaskwaitDirective()
: OMPExecutableDirective(this, OMPTaskwaitDirectiveClass, OMPD_taskwait,
SourceLocation(), SourceLocation(), 0, 0) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPTaskwaitDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskwaitDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskwaitDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskgroup' directive.
///
/// \code
/// #pragma omp taskgroup
/// \endcode
///
class OMPTaskgroupDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPTaskgroupDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
StartLoc, EndLoc, 0, 1) {}
/// \brief Build an empty directive.
///
explicit OMPTaskgroupDirective()
: OMPExecutableDirective(this, OMPTaskgroupDirectiveClass, OMPD_taskgroup,
SourceLocation(), SourceLocation(), 0, 1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTaskgroupDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPTaskgroupDirective *CreateEmpty(const ASTContext &C, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskgroupDirectiveClass;
}
};
/// \brief This represents '#pragma omp flush' directive.
///
/// \code
/// #pragma omp flush(a,b)
/// \endcode
/// In this example directive '#pragma omp flush' has 2 arguments- variables 'a'
/// and 'b'.
/// 'omp flush' directive does not have clauses but have an optional list of
/// variables to flush. This list of variables is stored within some fake clause
/// FlushClause.
class OMPFlushDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPFlushDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
StartLoc, EndLoc, NumClauses, 0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPFlushDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPFlushDirectiveClass, OMPD_flush,
SourceLocation(), SourceLocation(), NumClauses,
0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses (only single OMPFlushClause clause is
/// allowed).
///
static OMPFlushDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPFlushDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPFlushDirectiveClass;
}
};
/// \brief This represents '#pragma omp ordered' directive.
///
/// \code
/// #pragma omp ordered
/// \endcode
///
class OMPOrderedDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPOrderedDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPOrderedDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPOrderedDirectiveClass, OMPD_ordered,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPOrderedDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPOrderedDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPOrderedDirectiveClass;
}
};
/// \brief This represents '#pragma omp atomic' directive.
///
/// \code
/// #pragma omp atomic capture
/// \endcode
/// In this example directive '#pragma omp atomic' has clause 'capture'.
///
class OMPAtomicDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// x = x binop expr;
/// x = expr binop x;
/// \endcode
/// This field is true for the first form of the expression and false for the
/// second. Required for correct codegen of non-associative operations (like
/// << or >>).
bool IsXLHSInRHSPart;
/// \brief Used for 'atomic update' or 'atomic capture' constructs. They may
/// have atomic expressions of forms
/// \code
/// v = x; <update x>;
/// <update x>; v = x;
/// \endcode
/// This field is true for the first(postfix) form of the expression and false
/// otherwise.
bool IsPostfixUpdate;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPAtomicDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
StartLoc, EndLoc, NumClauses, 5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPAtomicDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPAtomicDirectiveClass, OMPD_atomic,
SourceLocation(), SourceLocation(), NumClauses,
5),
IsXLHSInRHSPart(false), IsPostfixUpdate(false) {}
/// \brief Set 'x' part of the associated expression/statement.
void setX(Expr *X) { *std::next(child_begin()) = X; }
/// \brief Set helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
void setUpdateExpr(Expr *UE) { *std::next(child_begin(), 2) = UE; }
/// \brief Set 'v' part of the associated expression/statement.
void setV(Expr *V) { *std::next(child_begin(), 3) = V; }
/// \brief Set 'expr' part of the associated expression/statement.
void setExpr(Expr *E) { *std::next(child_begin(), 4) = E; }
public:
/// \brief Creates directive with a list of \a Clauses and 'x', 'v' and 'expr'
/// parts of the atomic construct (see Section 2.12.6, atomic Construct, for
/// detailed description of 'x', 'v' and 'expr').
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param X 'x' part of the associated expression/statement.
/// \param V 'v' part of the associated expression/statement.
/// \param E 'expr' part of the associated expression/statement.
/// \param UE Helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
/// \param IsXLHSInRHSPart true if \a UE has the first form and false if the
/// second.
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
static OMPAtomicDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPAtomicDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get 'x' part of the associated expression/statement.
Expr *getX() { return cast_or_null<Expr>(*std::next(child_begin())); }
const Expr *getX() const {
return cast_or_null<Expr>(*std::next(child_begin()));
}
/// \brief Get helper expression of the form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' or
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
Expr *getUpdateExpr() {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
const Expr *getUpdateExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 2));
}
/// \brief Return true if helper update expression has form
/// 'OpaqueValueExpr(x) binop OpaqueValueExpr(expr)' and false if it has form
/// 'OpaqueValueExpr(expr) binop OpaqueValueExpr(x)'.
bool isXLHSInRHSPart() const { return IsXLHSInRHSPart; }
/// \brief Return true if 'v' expression must be updated to original value of
/// 'x', false if 'v' must be updated to the new value of 'x'.
bool isPostfixUpdate() const { return IsPostfixUpdate; }
/// \brief Get 'v' part of the associated expression/statement.
Expr *getV() { return cast_or_null<Expr>(*std::next(child_begin(), 3)); }
const Expr *getV() const {
return cast_or_null<Expr>(*std::next(child_begin(), 3));
}
/// \brief Get 'expr' part of the associated expression/statement.
Expr *getExpr() { return cast_or_null<Expr>(*std::next(child_begin(), 4)); }
const Expr *getExpr() const {
return cast_or_null<Expr>(*std::next(child_begin(), 4));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPAtomicDirectiveClass;
}
};
/// \brief This represents '#pragma omp target' directive.
///
/// \code
/// #pragma omp target if(a)
/// \endcode
/// In this example directive '#pragma omp target' has clause 'if' with
/// condition 'a'.
///
class OMPTargetDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDirectiveClass, OMPD_target,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDirectiveClass;
}
};
/// \brief This represents '#pragma omp target data' directive.
///
/// \code
/// #pragma omp target data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target data' has clauses 'device'
/// with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, StartLoc, EndLoc, NumClauses,
1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetDataDirectiveClass,
OMPD_target_data, SourceLocation(),
SourceLocation(), NumClauses, 1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetDataDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetDataDirective *CreateEmpty(const ASTContext &C, unsigned N,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target enter data' directive.
///
/// \code
/// #pragma omp target enter data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target enter data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetEnterDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetEnterDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetEnterDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetEnterDataDirectiveClass,
OMPD_target_enter_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetEnterDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetEnterDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetEnterDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target exit data' directive.
///
/// \code
/// #pragma omp target exit data device(0) if(a) map(b[:])
/// \endcode
/// In this example directive '#pragma omp target exit data' has clauses
/// 'device' with the value '0', 'if' with condition 'a' and 'map' with array
/// section 'b[:]'.
///
class OMPTargetExitDataDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param NumClauses The number of clauses.
///
OMPTargetExitDataDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/0) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetExitDataDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetExitDataDirectiveClass,
OMPD_target_exit_data, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/0) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPTargetExitDataDirective *Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses);
/// \brief Creates an empty directive with the place for \a N clauses.
///
/// \param C AST context.
/// \param N The number of clauses.
///
static OMPTargetExitDataDirective *CreateEmpty(const ASTContext &C,
unsigned N, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetExitDataDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel' directive.
///
/// \code
/// #pragma omp target parallel if(a)
/// \endcode
/// In this example directive '#pragma omp target parallel' has clause 'if' with
/// condition 'a'.
///
class OMPTargetParallelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, StartLoc, EndLoc,
NumClauses, /*NumChildren=*/1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTargetParallelDirectiveClass,
OMPD_target_parallel, SourceLocation(),
SourceLocation(), NumClauses,
/*NumChildren=*/1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTargetParallelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelDirective *
CreateEmpty(const ASTContext &C, unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelDirectiveClass;
}
};
/// \brief This represents '#pragma omp target parallel for' directive.
///
/// \code
/// #pragma omp target parallel for private(a,b) reduction(+:c,d)
/// \endcode
/// In this example directive '#pragma omp target parallel for' has clauses
/// 'private' with the variables 'a' and 'b' and 'reduction' with operator '+'
/// and variables 'c' and 'd'.
///
class OMPTargetParallelForDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief true if current region has inner cancel directive.
bool HasCancel;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTargetParallelForDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, StartLoc, EndLoc,
CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTargetParallelForDirective(unsigned CollapsedNum,
unsigned NumClauses)
: OMPLoopDirective(this, OMPTargetParallelForDirectiveClass,
OMPD_target_parallel_for, SourceLocation(),
SourceLocation(), CollapsedNum, NumClauses),
HasCancel(false) {}
/// \brief Set cancel state.
void setHasCancel(bool Has) { HasCancel = Has; }
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
/// \param HasCancel true if current directive has inner cancel directive.
///
static OMPTargetParallelForDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs, bool HasCancel);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTargetParallelForDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
/// \brief Return true if current directive has inner cancel directive.
bool hasCancel() const { return HasCancel; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTargetParallelForDirectiveClass;
}
};
/// \brief This represents '#pragma omp teams' directive.
///
/// \code
/// #pragma omp teams if(a)
/// \endcode
/// In this example directive '#pragma omp teams' has clause 'if' with
/// condition 'a'.
///
class OMPTeamsDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPTeamsDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
StartLoc, EndLoc, NumClauses, 1) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
///
explicit OMPTeamsDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPTeamsDirectiveClass, OMPD_teams,
SourceLocation(), SourceLocation(), NumClauses,
1) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
///
static OMPTeamsDirective *Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt);
/// \brief Creates an empty directive with the place for \a NumClauses
/// clauses.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPTeamsDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTeamsDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancellation point' directive.
///
/// \code
/// #pragma omp cancellation point for
/// \endcode
///
/// In this example a cancellation point is created for innermost 'for' region.
class OMPCancellationPointDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
///
OMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc)
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, StartLoc, EndLoc, 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
explicit OMPCancellationPointDirective()
: OMPExecutableDirective(this, OMPCancellationPointDirectiveClass,
OMPD_cancellation_point, SourceLocation(),
SourceLocation(), 0, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
///
static OMPCancellationPointDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
///
static OMPCancellationPointDirective *CreateEmpty(const ASTContext &C,
EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancellationPointDirectiveClass;
}
};
/// \brief This represents '#pragma omp cancel' directive.
///
/// \code
/// #pragma omp cancel for
/// \endcode
///
/// In this example a cancel is created for innermost 'for' region.
class OMPCancelDirective : public OMPExecutableDirective {
friend class ASTStmtReader;
OpenMPDirectiveKind CancelRegion;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param NumClauses Number of clauses.
///
OMPCancelDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
StartLoc, EndLoc, NumClauses, 0),
CancelRegion(OMPD_unknown) {}
/// \brief Build an empty directive.
///
/// \param NumClauses Number of clauses.
explicit OMPCancelDirective(unsigned NumClauses)
: OMPExecutableDirective(this, OMPCancelDirectiveClass, OMPD_cancel,
SourceLocation(), SourceLocation(), NumClauses,
0),
CancelRegion(OMPD_unknown) {}
/// \brief Set cancel region for current cancellation point.
/// \param CR Cancellation region.
void setCancelRegion(OpenMPDirectiveKind CR) { CancelRegion = CR; }
public:
/// \brief Creates directive.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param Clauses List of clauses.
///
static OMPCancelDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, OpenMPDirectiveKind CancelRegion);
/// \brief Creates an empty directive.
///
/// \param C AST context.
/// \param NumClauses Number of clauses.
///
static OMPCancelDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses, EmptyShell);
/// \brief Get cancellation region for the current cancellation point.
OpenMPDirectiveKind getCancelRegion() const { return CancelRegion; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPCancelDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop' directive.
///
/// \code
/// #pragma omp taskloop private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
StartLoc, EndLoc, CollapsedNum, NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopDirectiveClass, OMPD_taskloop,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopDirectiveClass;
}
};
/// \brief This represents '#pragma omp taskloop simd' directive.
///
/// \code
/// #pragma omp taskloop simd private(a,b) grainsize(val) num_tasks(num)
/// \endcode
/// In this example directive '#pragma omp taskloop simd' has clauses 'private'
/// with the variables 'a' and 'b', 'grainsize' with expression 'val' and
/// 'num_tasks' with expression 'num'.
///
class OMPTaskLoopSimdDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPTaskLoopSimdDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, StartLoc, EndLoc, CollapsedNum,
NumClauses) {}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPTaskLoopSimdDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPTaskLoopSimdDirectiveClass,
OMPD_taskloop_simd, SourceLocation(), SourceLocation(),
CollapsedNum, NumClauses) {}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPTaskLoopSimdDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPTaskLoopSimdDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum,
EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPTaskLoopSimdDirectiveClass;
}
};
/// \brief This represents '#pragma omp distribute' directive.
///
/// \code
/// #pragma omp distribute private(a,b)
/// \endcode
/// In this example directive '#pragma omp distribute' has clauses 'private'
/// with the variables 'a' and 'b'
///
class OMPDistributeDirective : public OMPLoopDirective {
friend class ASTStmtReader;
/// \brief Build directive with the given start and end location.
///
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending location of the directive.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
OMPDistributeDirective(SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
StartLoc, EndLoc, CollapsedNum, NumClauses)
{}
/// \brief Build an empty directive.
///
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
explicit OMPDistributeDirective(unsigned CollapsedNum, unsigned NumClauses)
: OMPLoopDirective(this, OMPDistributeDirectiveClass, OMPD_distribute,
SourceLocation(), SourceLocation(), CollapsedNum,
NumClauses)
{}
public:
/// \brief Creates directive with a list of \a Clauses.
///
/// \param C AST context.
/// \param StartLoc Starting location of the directive kind.
/// \param EndLoc Ending Location of the directive.
/// \param CollapsedNum Number of collapsed loops.
/// \param Clauses List of clauses.
/// \param AssociatedStmt Statement, associated with the directive.
/// \param Exprs Helper expressions for CodeGen.
///
static OMPDistributeDirective *
Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses,
Stmt *AssociatedStmt, const HelperExprs &Exprs);
/// \brief Creates an empty directive with the place
/// for \a NumClauses clauses.
///
/// \param C AST context.
/// \param CollapsedNum Number of collapsed nested loops.
/// \param NumClauses Number of clauses.
///
static OMPDistributeDirective *CreateEmpty(const ASTContext &C,
unsigned NumClauses,
unsigned CollapsedNum, EmptyShell);
static bool classof(const Stmt *T) {
return T->getStmtClass() == OMPDistributeDirectiveClass;
}
};
} // end namespace clang
#endif
|
DRB020-privatemissing-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
tmp should be put as private to avoid race condition
Data race pair: tmp@65 vs. tmp@66
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int tmp;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len];
for (i=0;i<len;i++)
a[i]=i;
#pragma omp parallel for
for (i=0;i<len;i++)
{
tmp =a[i]+i;
a[i] = tmp;
}
omprace_fini();
return 0;
}
|
aac.c | #include<stdio.h>
#include<stdlib.h>
#include<time.h>
#include<math.h>
#include<unistd.h>
#include<omp.h>
#define NODE_SAYISI 5
#define HEDEF_DUGUM 4
#define BASLANGIC_DURUMU 0
#define FEROMON_ESIGI 1
#define FEROMON_SALGI_MIKTARI 0.2
#define KARINCA_SAYISI 100
#define MAX_YOL_UZUNLUGU 30
#define TRUE 1
#define FALSE 0
#define NO_WAY 999
#define NO_CHOOSEN_WAY 99
_Bool komsuluk_matrisi[NODE_SAYISI][NODE_SAYISI];
int agirlik_matrisi[NODE_SAYISI][NODE_SAYISI];
float feromon_matrisi[NODE_SAYISI][NODE_SAYISI];
int yol[KARINCA_SAYISI][MAX_YOL_UZUNLUGU];
void create_graph(){
for(int i=0;i<NODE_SAYISI;i++)
for(int j=0; j<NODE_SAYISI; j++)
feromon_matrisi[i][j] = 0;
komsuluk_matrisi[0][1] = TRUE;
agirlik_matrisi[0][1] = 5;
komsuluk_matrisi[0][2] = TRUE;
agirlik_matrisi[0][2] = 6;
komsuluk_matrisi[1][3] = TRUE;
agirlik_matrisi[1][3] = 5;
komsuluk_matrisi[2][1] = TRUE;
agirlik_matrisi[2][1] = 2;
komsuluk_matrisi[3][4] = TRUE;
agirlik_matrisi[3][4] = 10;
komsuluk_matrisi[2][3] = TRUE;
agirlik_matrisi[2][3] = 1;
komsuluk_matrisi[1][2] = TRUE;
agirlik_matrisi[1][2] = 2;
}
void print_feromon_matrisi(){
for (int i = 0; i < NODE_SAYISI; i++)
for(int j = 0; j<NODE_SAYISI; j++)
printf("%d'den %d'ye: %f\n", i,j,feromon_matrisi[i][j]);
}
void print_ant_way(int karinca_no){
printf("karinca no: %d, yol: ", karinca_no);
int temp_uzunluk = 0;
for(int i = 0; i<MAX_YOL_UZUNLUGU; i++){
if(yol[karinca_no][i+1] != NO_WAY){
printf(" %d ->", yol[karinca_no][i]);
temp_uzunluk += agirlik_matrisi[yol[karinca_no][i]][yol[karinca_no][i+1]];
}else{
printf("%d, toplam uzunluk: %d\n", yol[karinca_no][i], temp_uzunluk);
break;
}
}
}
int find_possible_ways_and_get_counts(int durum, int *gidilebilecek_olasi_yollar){
int temp_sayac = 0;
for(int i = 0; i < NODE_SAYISI; i++){
if(komsuluk_matrisi[durum][i]){
gidilebilecek_olasi_yollar[temp_sayac] = i;
temp_sayac++;
}
}
return temp_sayac;
}
int smell_ways_and_get_feromon(int durum, int *gidilebilecek_olasi_yollar, int temp_sayac){
int secilen_yol = NO_CHOOSEN_WAY;
for(int i = 0; i<temp_sayac; i++){
if(feromon_matrisi[durum][gidilebilecek_olasi_yollar[i]] > FEROMON_ESIGI){
secilen_yol = gidilebilecek_olasi_yollar[i];
// printf("ferr seçildi, %d->%d\n", durum,secilen_yol);
}
}
return secilen_yol;
}
int get_random_way(int *gidilebilecek_olasi_yollar, int temp_sayac){
sleep(0.2); // for more randomizing
return gidilebilecek_olasi_yollar[abs(rand()%temp_sayac)];
}
int main(int argc, char const *argv[]) {
create_graph();
int durum;
srand(time(NULL));
int temp_sayac;
int gidilebilecek_olasi_yollar[NODE_SAYISI];
int iterasyon_no;
int secilen_yol;
#pragma omp parallel for num_threads(4) private(durum, iterasyon_no, secilen_yol, temp_sayac, gidilebilecek_olasi_yollar)
for(int karinca_no = 0; karinca_no < KARINCA_SAYISI; karinca_no++){
durum = BASLANGIC_DURUMU;
yol[karinca_no][0] = durum;
iterasyon_no = 1; // Başlangıç durumuna gelmiş olması da bir iterasyon
secilen_yol = NO_CHOOSEN_WAY;
for(int i = 1; i<MAX_YOL_UZUNLUGU; i++)
yol[karinca_no][i] = NO_WAY;
while(durum != HEDEF_DUGUM && iterasyon_no < MAX_YOL_UZUNLUGU){
temp_sayac = find_possible_ways_and_get_counts(durum, gidilebilecek_olasi_yollar);
secilen_yol = smell_ways_and_get_feromon(durum, gidilebilecek_olasi_yollar, temp_sayac);
if(secilen_yol == NO_CHOOSEN_WAY)
secilen_yol = get_random_way(gidilebilecek_olasi_yollar, temp_sayac);
#pragma omp critical
feromon_matrisi[durum][secilen_yol] += FEROMON_SALGI_MIKTARI/agirlik_matrisi[durum][secilen_yol];
durum = secilen_yol;
yol[karinca_no][iterasyon_no] = secilen_yol;
iterasyon_no++;
}
}
for(int i=0; i<KARINCA_SAYISI;i++)
print_ant_way(i);
return 0;
}
|
resample.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% RRRR EEEEE SSSSS AAA M M PPPP L EEEEE %
% R R E SS A A MM MM P P L E %
% RRRR EEE SSS AAAAA M M M PPPP L EEE %
% R R E SS A A M M P L E %
% R R EEEEE SSSSS A A M M P LLLLL EEEEE %
% %
% %
% MagickCore Pixel Resampling Methods %
% %
% Software Design %
% Cristy %
% Anthony Thyssen %
% August 2007 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/color-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/draw.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/resample.h"
#include "MagickCore/resize.h"
#include "MagickCore/resize-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/option.h"
/*
EWA Resampling Options
*/
/* select ONE resampling method */
#define EWA 1 /* Normal EWA handling - raw or clamped */
/* if 0 then use "High Quality EWA" */
#define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */
#define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */
/* output debugging information */
#define DEBUG_ELLIPSE 0 /* output ellipse info for debug */
#define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */
#define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */
#if ! FILTER_DIRECT
#define WLUT_WIDTH 1024 /* size of the filter cache */
#endif
/*
Typedef declarations.
*/
struct _ResampleFilter
{
CacheView
*view;
Image
*image;
ExceptionInfo
*exception;
MagickBooleanType
debug;
/* Information about image being resampled */
ssize_t
image_area;
PixelInterpolateMethod
interpolate;
VirtualPixelMethod
virtual_pixel;
FilterType
filter;
/* processing settings needed */
MagickBooleanType
limit_reached,
do_interpolate,
average_defined;
PixelInfo
average_pixel;
/* current ellipitical area being resampled around center point */
double
A, B, C,
Vlimit, Ulimit, Uwidth, slope;
#if FILTER_LUT
/* LUT of weights for filtered average in elliptical area */
double
filter_lut[WLUT_WIDTH];
#else
/* Use a Direct call to the filter functions */
ResizeFilter
*filter_def;
double
F;
#endif
/* the practical working support of the filter */
double
support;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireResampleFilter() initializes the information resample needs do to a
% scaled lookup of a color from an image, using area sampling.
%
% The algorithm is based on a Elliptical Weighted Average, where the pixels
% found in a large elliptical area is averaged together according to a
% weighting (filter) function. For more details see "Fundamentals of Texture
% Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17,
% 1989. Available for free from, http://www.cs.cmu.edu/~ph/
%
% As EWA resampling (or any sort of resampling) can require a lot of
% calculations to produce a distorted scaling of the source image for each
% output pixel, the ResampleFilter structure generated holds that information
% between individual image resampling.
%
% This function will make the appropriate AcquireCacheView() calls
% to view the image, calling functions do not need to open a cache view.
%
% Usage Example...
% resample_filter=AcquireResampleFilter(image,exception);
% SetResampleFilter(resample_filter, GaussianFilter);
% for (y=0; y < (ssize_t) image->rows; y++) {
% for (x=0; x < (ssize_t) image->columns; x++) {
% u= ....; v= ....;
% ScaleResampleFilter(resample_filter, ... scaling vectors ...);
% (void) ResamplePixelColor(resample_filter,u,v,&pixel);
% ... assign resampled pixel value ...
% }
% }
% DestroyResampleFilter(resample_filter);
%
% The format of the AcquireResampleFilter method is:
%
% ResampleFilter *AcquireResampleFilter(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ResampleFilter *AcquireResampleFilter(const Image *image,
ExceptionInfo *exception)
{
register ResampleFilter
*resample_filter;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
resample_filter=(ResampleFilter *) AcquireMagickMemory(sizeof(
*resample_filter));
if (resample_filter == (ResampleFilter *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter));
resample_filter->exception=exception;
resample_filter->image=ReferenceImage((Image *) image);
resample_filter->view=AcquireVirtualCacheView(resample_filter->image,
exception);
resample_filter->debug=IsEventLogging();
resample_filter->image_area=(ssize_t) (image->columns*image->rows);
resample_filter->average_defined=MagickFalse;
resample_filter->signature=MagickCoreSignature;
SetResampleFilter(resample_filter,image->filter);
(void) SetResampleFilterInterpolateMethod(resample_filter,image->interpolate);
(void) SetResampleFilterVirtualPixelMethod(resample_filter,
GetImageVirtualPixelMethod(image));
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y R e s a m p l e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyResampleFilter() finalizes and cleans up the resampling
% resample_filter as returned by AcquireResampleFilter(), freeing any memory
% or other information as needed.
%
% The format of the DestroyResampleFilter method is:
%
% ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling information structure
%
*/
MagickExport ResampleFilter *DestroyResampleFilter(
ResampleFilter *resample_filter)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->view=DestroyCacheView(resample_filter->view);
resample_filter->image=DestroyImage(resample_filter->image);
#if ! FILTER_LUT
resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def);
#endif
resample_filter->signature=(~MagickCoreSignature);
resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter);
return(resample_filter);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s a m p l e P i x e l C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResamplePixelColor() samples the pixel values surrounding the location
% given using an elliptical weighted average, at the scale previously
% calculated, and in the most efficent manner possible for the
% VirtualPixelMethod setting.
%
% The format of the ResamplePixelColor method is:
%
% MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter,
% const double u0,const double v0,PixelInfo *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o u0,v0: A double representing the center of the area to resample,
% The distortion transformed transformed x,y coordinate.
%
% o pixel: the resampled pixel is returned here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResamplePixelColor(
ResampleFilter *resample_filter,const double u0,const double v0,
PixelInfo *pixel,ExceptionInfo *exception)
{
MagickBooleanType
status;
ssize_t u,v, v1, v2, uw, hit;
double u1;
double U,V,Q,DQ,DDQ;
double divisor_c,divisor_m;
register double weight;
register const Quantum *pixels;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
status=MagickTrue;
/* GetPixelInfo(resample_filter->image,pixel); */
if ( resample_filter->do_interpolate ) {
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
resample_filter->interpolate,u0,v0,pixel,resample_filter->exception);
return(status);
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0);
#endif
/*
Does resample area Miss the image Proper?
If and that area a simple solid color - then simply return that color!
This saves a lot of calculation when resampling outside the bounds of
the source image.
However it probably should be expanded to image bounds plus the filters
scaled support size.
*/
hit = 0;
switch ( resample_filter->virtual_pixel ) {
case BackgroundVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case MaskVirtualPixelMethod:
if ( resample_filter->limit_reached
|| u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
|| v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++;
break;
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 + resample_filter->Ulimit < 0.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 + resample_filter->Vlimit < 0.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0 )
)
hit++;
break;
case HorizontalTileVirtualPixelMethod:
if ( v0 + resample_filter->Vlimit < 0.0
|| v0 - resample_filter->Vlimit > (double) resample_filter->image->rows-1.0
)
hit++; /* outside the horizontally tiled images. */
break;
case VerticalTileVirtualPixelMethod:
if ( u0 + resample_filter->Ulimit < 0.0
|| u0 - resample_filter->Ulimit > (double) resample_filter->image->columns-1.0
)
hit++; /* outside the vertically tiled images. */
break;
case DitherVirtualPixelMethod:
if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 + resample_filter->Ulimit < -32.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 + resample_filter->Vlimit < -32.0 )
|| ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+31.0
&& v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+31.0 )
)
hit++;
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
/* resampling of area is always needed - no VP limits */
break;
}
if ( hit ) {
/* The area being resampled is simply a solid color
* just return a single lookup color.
*
* Should this return the users requested interpolated color?
*/
status=InterpolatePixelInfo(resample_filter->image,resample_filter->view,
IntegerInterpolatePixel,u0,v0,pixel,resample_filter->exception);
return(status);
}
/*
When Scaling limits reached, return an 'averaged' result.
*/
if ( resample_filter->limit_reached ) {
switch ( resample_filter->virtual_pixel ) {
/* This is always handled by the above, so no need.
case BackgroundVirtualPixelMethod:
case ConstantVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case GrayVirtualPixelMethod,
case WhiteVirtualPixelMethod
case MaskVirtualPixelMethod:
*/
case UndefinedVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case DitherVirtualPixelMethod:
case HorizontalTileEdgeVirtualPixelMethod:
case VerticalTileEdgeVirtualPixelMethod:
/* We need an average edge pixel, from the correct edge!
How should I calculate an average edge color?
Just returning an averaged neighbourhood,
works well in general, but falls down for TileEdge methods.
This needs to be done properly!!!!!!
*/
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,AverageInterpolatePixel,u0,v0,pixel,
resample_filter->exception);
break;
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
/* just return the background pixel - Is there more direct way? */
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel,
resample_filter->exception);
break;
case TileVirtualPixelMethod:
case MirrorVirtualPixelMethod:
case RandomVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
default:
/* generate a average color of the WHOLE image */
if ( resample_filter->average_defined == MagickFalse ) {
Image
*average_image;
CacheView
*average_view;
GetPixelInfo(resample_filter->image,(PixelInfo *)
&resample_filter->average_pixel);
resample_filter->average_defined=MagickTrue;
/* Try to get an averaged pixel color of whole image */
average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,
resample_filter->exception);
if (average_image == (Image *) NULL)
{
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
average_view=AcquireVirtualCacheView(average_image,exception);
pixels=GetCacheViewVirtualPixels(average_view,0,0,1,1,
resample_filter->exception);
if (pixels == (const Quantum *) NULL) {
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
*pixel=resample_filter->average_pixel; /* FAILED */
break;
}
GetPixelInfoPixel(resample_filter->image,pixels,
&(resample_filter->average_pixel));
average_view=DestroyCacheView(average_view);
average_image=DestroyImage(average_image);
if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod )
{
/* CheckerTile is a alpha blend of the image's average pixel
color and the current background color */
/* image's average pixel color */
weight = QuantumScale*((double)
resample_filter->average_pixel.alpha);
resample_filter->average_pixel.red *= weight;
resample_filter->average_pixel.green *= weight;
resample_filter->average_pixel.blue *= weight;
divisor_c = weight;
/* background color */
weight = QuantumScale*((double)
resample_filter->image->background_color.alpha);
resample_filter->average_pixel.red +=
weight*resample_filter->image->background_color.red;
resample_filter->average_pixel.green +=
weight*resample_filter->image->background_color.green;
resample_filter->average_pixel.blue +=
weight*resample_filter->image->background_color.blue;
resample_filter->average_pixel.alpha +=
resample_filter->image->background_color.alpha;
divisor_c += weight;
/* alpha blend */
resample_filter->average_pixel.red /= divisor_c;
resample_filter->average_pixel.green /= divisor_c;
resample_filter->average_pixel.blue /= divisor_c;
resample_filter->average_pixel.alpha /= 2; /* 50% blend */
}
}
*pixel=resample_filter->average_pixel;
break;
}
return(status);
}
/*
Initialize weighted average data collection
*/
hit = 0;
divisor_c = 0.0;
divisor_m = 0.0;
pixel->red = pixel->green = pixel->blue = 0.0;
if (pixel->colorspace == CMYKColorspace)
pixel->black = 0.0;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = 0.0;
/*
Determine the parellelogram bounding box fitted to the ellipse
centered at u0,v0. This area is bounding by the lines...
*/
v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */
v2 = (ssize_t)floor(v0 + resample_filter->Vlimit);
/* scan line start and width accross the parallelogram */
u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth;
uw = (ssize_t)(2.0*resample_filter->Uwidth)+1;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2);
(void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw);
#else
# define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */
#endif
/*
Do weighted resampling of all pixels, within the scaled ellipse,
bound by a Parellelogram fitted to the ellipse.
*/
DDQ = 2*resample_filter->A;
for( v=v1; v<=v2; v++ ) {
#if DEBUG_HIT_MISS
long uu = ceil(u1); /* actual pixel location (for debug only) */
(void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v);
#endif
u = (ssize_t)ceil(u1); /* first pixel in scanline */
u1 += resample_filter->slope; /* start of next scan line */
/* location of this first pixel, relative to u0,v0 */
U = (double)u-u0;
V = (double)v-v0;
/* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */
Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V;
DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V;
/* get the scanline of pixels for this v */
pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw,
1,resample_filter->exception);
if (pixels == (const Quantum *) NULL)
return(MagickFalse);
/* count up the weighted pixel colors */
for( u=0; u<uw; u++ ) {
#if FILTER_LUT
/* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */
if ( Q < (double)WLUT_WIDTH ) {
weight = resample_filter->filter_lut[(int)Q];
#else
/* Note that the ellipse has been pre-scaled so F = support^2 */
if ( Q < (double)resample_filter->F ) {
weight = GetResizeFilterWeight(resample_filter->filter_def,
sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */
#endif
pixel->alpha += weight*GetPixelAlpha(resample_filter->image,pixels);
divisor_m += weight;
if (pixel->alpha_trait != UndefinedPixelTrait)
weight *= QuantumScale*((double) GetPixelAlpha(resample_filter->image,pixels));
pixel->red += weight*GetPixelRed(resample_filter->image,pixels);
pixel->green += weight*GetPixelGreen(resample_filter->image,pixels);
pixel->blue += weight*GetPixelBlue(resample_filter->image,pixels);
if (pixel->colorspace == CMYKColorspace)
pixel->black += weight*GetPixelBlack(resample_filter->image,pixels);
divisor_c += weight;
hit++;
#if DEBUG_HIT_MISS
/* mark the pixel according to hit/miss of the ellipse */
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
} else {
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1);
(void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n",
(long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1);
}
uu++;
#else
}
#endif
pixels+=GetPixelChannels(resample_filter->image);
Q += DQ;
DQ += DDQ;
}
}
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) );
#endif
/*
Result sanity check -- this should NOT happen
*/
if ( hit == 0 || divisor_m <= MagickEpsilon || divisor_c <= MagickEpsilon ) {
/* not enough pixels, or bad weighting in resampling,
resort to direct interpolation */
#if DEBUG_NO_PIXEL_HIT
pixel->alpha = pixel->red = pixel->green = pixel->blue = 0;
pixel->red = QuantumRange; /* show pixels for which EWA fails */
#else
status=InterpolatePixelInfo(resample_filter->image,
resample_filter->view,resample_filter->interpolate,u0,v0,pixel,
resample_filter->exception);
#endif
return status;
}
/*
Finialize results of resampling
*/
divisor_m = 1.0/divisor_m;
if (pixel->alpha_trait != UndefinedPixelTrait)
pixel->alpha = (double) ClampToQuantum(divisor_m*pixel->alpha);
divisor_c = 1.0/divisor_c;
pixel->red = (double) ClampToQuantum(divisor_c*pixel->red);
pixel->green = (double) ClampToQuantum(divisor_c*pixel->green);
pixel->blue = (double) ClampToQuantum(divisor_c*pixel->blue);
if (pixel->colorspace == CMYKColorspace)
pixel->black = (double) ClampToQuantum(divisor_c*pixel->black);
return(MagickTrue);
}
#if EWA && EWA_CLAMP
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
- C l a m p U p A x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampUpAxes() function converts the input vectors into a major and
% minor axis unit vectors, and their magnitude. This allows us to
% ensure that the ellipse generated is never smaller than the unit
% circle and thus never too small for use in EWA resampling.
%
% This purely mathematical 'magic' was provided by Professor Nicolas
% Robidoux and his Masters student Chantal Racette.
%
% Reference: "We Recommend Singular Value Decomposition", David Austin
% http://www.ams.org/samplings/feature-column/fcarc-svd
%
% By generating major and minor axis vectors, we can actually use the
% ellipse in its "canonical form", by remapping the dx,dy of the
% sampled point into distances along the major and minor axis unit
% vectors.
%
% Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form
*/
static inline void ClampUpAxes(const double dux,
const double dvx,
const double duy,
const double dvy,
double *major_mag,
double *minor_mag,
double *major_unit_x,
double *major_unit_y,
double *minor_unit_x,
double *minor_unit_y)
{
/*
* ClampUpAxes takes an input 2x2 matrix
*
* [ a b ] = [ dux duy ]
* [ c d ] = [ dvx dvy ]
*
* and computes from it the major and minor axis vectors [major_x,
* major_y] and [minor_x,minor_y] of the smallest ellipse containing
* both the unit disk and the ellipse which is the image of the unit
* disk by the linear transformation
*
* [ dux duy ] [S] = [s]
* [ dvx dvy ] [T] = [t]
*
* (The vector [S,T] is the difference between a position in output
* space and [X,Y]; the vector [s,t] is the difference between a
* position in input space and [x,y].)
*/
/*
* Output:
*
* major_mag is the half-length of the major axis of the "new"
* ellipse.
*
* minor_mag is the half-length of the minor axis of the "new"
* ellipse.
*
* major_unit_x is the x-coordinate of the major axis direction vector
* of both the "old" and "new" ellipses.
*
* major_unit_y is the y-coordinate of the major axis direction vector.
*
* minor_unit_x is the x-coordinate of the minor axis direction vector.
*
* minor_unit_y is the y-coordinate of the minor axis direction vector.
*
* Unit vectors are useful for computing projections, in particular,
* to compute the distance between a point in output space and the
* center of a unit disk in output space, using the position of the
* corresponding point [s,t] in input space. Following the clamping,
* the square of this distance is
*
* ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2
* +
* ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2
*
* If such distances will be computed for many [s,t]'s, it makes
* sense to actually compute the reciprocal of major_mag and
* minor_mag and multiply them by the above unit lengths.
*
* Now, if you want to modify the input pair of tangent vectors so
* that it defines the modified ellipse, all you have to do is set
*
* newdux = major_mag * major_unit_x
* newdvx = major_mag * major_unit_y
* newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y
* newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x
*
* and use these tangent vectors as if they were the original ones.
* Usually, this is a drastic change in the tangent vectors even if
* the singular values are not clamped; for example, the minor axis
* vector always points in a direction which is 90 degrees
* counterclockwise from the direction of the major axis vector.
*/
/*
* Discussion:
*
* GOAL: Fix things so that the pullback, in input space, of a disk
* of radius r in output space is an ellipse which contains, at
* least, a disc of radius r. (Make this hold for any r>0.)
*
* ESSENCE OF THE METHOD: Compute the product of the first two
* factors of an SVD of the linear transformation defining the
* ellipse and make sure that both its columns have norm at least 1.
* Because rotations and reflexions map disks to themselves, it is
* not necessary to compute the third (rightmost) factor of the SVD.
*
* DETAILS: Find the singular values and (unit) left singular
* vectors of Jinv, clampling up the singular values to 1, and
* multiply the unit left singular vectors by the new singular
* values in order to get the minor and major ellipse axis vectors.
*
* Image resampling context:
*
* The Jacobian matrix of the transformation at the output point
* under consideration is defined as follows:
*
* Consider the transformation (x,y) -> (X,Y) from input locations
* to output locations. (Anthony Thyssen, elsewhere in resample.c,
* uses the notation (u,v) -> (x,y).)
*
* The Jacobian matrix of the transformation at (x,y) is equal to
*
* J = [ A, B ] = [ dX/dx, dX/dy ]
* [ C, D ] [ dY/dx, dY/dy ]
*
* that is, the vector [A,C] is the tangent vector corresponding to
* input changes in the horizontal direction, and the vector [B,D]
* is the tangent vector corresponding to input changes in the
* vertical direction.
*
* In the context of resampling, it is natural to use the inverse
* Jacobian matrix Jinv because resampling is generally performed by
* pulling pixel locations in the output image back to locations in
* the input image. Jinv is
*
* Jinv = [ a, b ] = [ dx/dX, dx/dY ]
* [ c, d ] [ dy/dX, dy/dY ]
*
* Note: Jinv can be computed from J with the following matrix
* formula:
*
* Jinv = 1/(A*D-B*C) [ D, -B ]
* [ -C, A ]
*
* What we do is modify Jinv so that it generates an ellipse which
* is as close as possible to the original but which contains the
* unit disk. This can be accomplished as follows:
*
* Let
*
* Jinv = U Sigma V^T
*
* be an SVD decomposition of Jinv. (The SVD is not unique, but the
* final ellipse does not depend on the particular SVD.)
*
* We could clamp up the entries of the diagonal matrix Sigma so
* that they are at least 1, and then set
*
* Jinv = U newSigma V^T.
*
* However, we do not need to compute V for the following reason:
* V^T is an orthogonal matrix (that is, it represents a combination
* of rotations and reflexions) so that it maps the unit circle to
* itself. For this reason, the exact value of V does not affect the
* final ellipse, and we can choose V to be the identity
* matrix. This gives
*
* Jinv = U newSigma.
*
* In the end, we return the two diagonal entries of newSigma
* together with the two columns of U.
*/
/*
* ClampUpAxes was written by Nicolas Robidoux and Chantal Racette
* of Laurentian University with insightful suggestions from Anthony
* Thyssen and funding from the National Science and Engineering
* Research Council of Canada. It is distinguished from its
* predecessors by its efficient handling of degenerate cases.
*
* The idea of clamping up the EWA ellipse's major and minor axes so
* that the result contains the reconstruction kernel filter support
* is taken from Andreas Gustaffson's Masters thesis "Interactive
* Image Warping", Helsinki University of Technology, Faculty of
* Information Technology, 59 pages, 1993 (see Section 3.6).
*
* The use of the SVD to clamp up the singular values of the
* Jacobian matrix of the pullback transformation for EWA resampling
* is taken from the astrophysicist Craig DeForest. It is
* implemented in his PDL::Transform code (PDL = Perl Data
* Language).
*/
const double a = dux;
const double b = duy;
const double c = dvx;
const double d = dvy;
/*
* n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the
* squares of the singular values of Jinv.
*/
const double aa = a*a;
const double bb = b*b;
const double cc = c*c;
const double dd = d*d;
/*
* Eigenvectors of n are left singular vectors of Jinv.
*/
const double n11 = aa+bb;
const double n12 = a*c+b*d;
const double n21 = n12;
const double n22 = cc+dd;
const double det = a*d-b*c;
const double twice_det = det+det;
const double frobenius_squared = n11+n22;
const double discriminant =
(frobenius_squared+twice_det)*(frobenius_squared-twice_det);
/*
* In exact arithmetic, discriminant can't be negative. In floating
* point, it can, because of the bad conditioning of SVD
* decompositions done through the associated normal matrix.
*/
const double sqrt_discriminant =
sqrt(discriminant > 0.0 ? discriminant : 0.0);
/*
* s1 is the largest singular value of the inverse Jacobian
* matrix. In other words, its reciprocal is the smallest singular
* value of the Jacobian matrix itself.
* If s1 = 0, both singular values are 0, and any orthogonal pair of
* left and right factors produces a singular decomposition of Jinv.
*/
/*
* Initially, we only compute the squares of the singular values.
*/
const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant);
/*
* s2 the smallest singular value of the inverse Jacobian
* matrix. Its reciprocal is the largest singular value of the
* Jacobian matrix itself.
*/
const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant);
const double s1s1minusn11 = s1s1-n11;
const double s1s1minusn22 = s1s1-n22;
/*
* u1, the first column of the U factor of a singular decomposition
* of Jinv, is a (non-normalized) left singular vector corresponding
* to s1. It has entries u11 and u21. We compute u1 from the fact
* that it is an eigenvector of n corresponding to the eigenvalue
* s1^2.
*/
const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11;
const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22;
/*
* The following selects the largest row of n-s1^2 I as the one
* which is used to find the eigenvector. If both s1^2-n11 and
* s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case,
* any vector is an eigenvector; in addition, norm below is equal to
* zero, and, in exact arithmetic, this is the only case in which
* norm = 0. So, setting u1 to the simple but arbitrary vector [1,0]
* if norm = 0 safely takes care of all cases.
*/
const double temp_u11 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 );
const double temp_u21 =
( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 );
const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21);
/*
* Finalize the entries of first left singular vector (associated
* with the largest singular value).
*/
const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 );
const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 );
/*
* Clamp the singular values up to 1.
*/
*major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) );
*minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) );
/*
* Return the unit major and minor axis direction vectors.
*/
*major_unit_x = u11;
*major_unit_y = u21;
*minor_unit_x = -u21;
*minor_unit_y = u11;
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleResampleFilter() does all the calculations needed to resample an image
% at a specific scale, defined by two scaling vectors. This not using
% a orthogonal scaling, but two distorted scaling vectors, to allow the
% generation of a angled ellipse.
%
% As only two deritive scaling vectors are used the center of the ellipse
% must be the center of the lookup. That is any curvature that the
% distortion may produce is discounted.
%
% The input vectors are produced by either finding the derivitives of the
% distortion function, or the partial derivitives from a distortion mapping.
% They do not need to be the orthogonal dx,dy scaling vectors, but can be
% calculated from other derivatives. For example you could use dr,da/r
% polar coordinate vector scaling vectors
%
% If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y)
% Then the scaling vectors are determined from the deritives...
% du/dx, dv/dx and du/dy, dv/dy
% If the resulting scaling vectors is othogonally aligned then...
% dv/dx = 0 and du/dy = 0
% Producing an othogonally alligned ellipse in source space for the area to
% be resampled.
%
% Note that scaling vectors are different to argument order. Argument order
% is the general order the deritives are extracted from the distortion
% equations, and not the scaling vectors. As such the middle two vaules
% may be swapped from what you expect. Caution is advised.
%
% WARNING: It is assumed that any SetResampleFilter() method call will
% always be performed before the ScaleResampleFilter() method, so that the
% size of the ellipse will match the support for the resampling filter being
% used.
%
% The format of the ScaleResampleFilter method is:
%
% void ScaleResampleFilter(const ResampleFilter *resample_filter,
% const double dux,const double duy,const double dvx,const double dvy)
%
% A description of each parameter follows:
%
% o resample_filter: the resampling resample_filterrmation defining the
% image being resampled
%
% o dux,duy,dvx,dvy:
% The deritives or scaling vectors defining the EWA ellipse.
% NOTE: watch the order, which is based on the order deritives
% are usally determined from distortion equations (see above).
% The middle two values may need to be swapped if you are thinking
% in terms of scaling vectors.
%
*/
MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter,
const double dux,const double duy,const double dvx,const double dvy)
{
double A,B,C,F;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->limit_reached = MagickFalse;
/* A 'point' filter forces use of interpolation instead of area sampling */
if ( resample_filter->filter == PointFilter )
return; /* EWA turned off - nothing to do */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "# -----\n" );
(void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n",
dux, dvx, duy, dvy);
#endif
/* Find Ellipse Coefficents such that
A*u^2 + B*u*v + C*v^2 = F
With u,v relative to point around which we are resampling.
And the given scaling dx,dy vectors in u,v space
du/dx,dv/dx and du/dy,dv/dy
*/
#if EWA
/* Direct conversion of derivatives into elliptical coefficients
However when magnifying images, the scaling vectors will be small
resulting in a ellipse that is too small to sample properly.
As such we need to clamp the major/minor axis to a minumum of 1.0
to prevent it getting too small.
*/
#if EWA_CLAMP
{ double major_mag,
minor_mag,
major_x,
major_y,
minor_x,
minor_y;
ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag,
&major_x, &major_y, &minor_x, &minor_y);
major_x *= major_mag; major_y *= major_mag;
minor_x *= minor_mag; minor_y *= minor_mag;
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n",
major_x, major_y, minor_x, minor_y);
#endif
A = major_y*major_y+minor_y*minor_y;
B = -2.0*(major_x*major_y+minor_x*minor_y);
C = major_x*major_x+minor_x*minor_x;
F = major_mag*minor_mag;
F *= F; /* square it */
}
#else /* raw unclamped EWA */
A = dvx*dvx+dvy*dvy;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy;
F = dux*dvy-duy*dvx;
F *= F; /* square it */
#endif /* EWA_CLAMP */
#else /* HQ_EWA */
/*
This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his
thesis, which adds a unit circle to the elliptical area so as to do both
Reconstruction and Prefiltering of the pixels in the resampling. It also
means it is always likely to have at least 4 pixels within the area of the
ellipse, for weighted averaging. No scaling will result with F == 4.0 and
a circle of radius 2.0, and F smaller than this means magnification is
being used.
NOTE: This method produces a very blury result at near unity scale while
producing perfect results for strong minitification and magnifications.
However filter support is fixed to 2.0 (no good for Windowed Sinc filters)
*/
A = dvx*dvx+dvy*dvy+1;
B = -2.0*(dux*dvx+duy*dvy);
C = dux*dux+duy*duy+1;
F = A*C - B*B/4;
#endif
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F);
/* Figure out the various information directly about the ellipse.
This information currently not needed at this time, but may be
needed later for better limit determination.
It is also good to have as a record for future debugging
*/
{ double alpha, beta, gamma, Major, Minor;
double Eccentricity, Ellipse_Area, Ellipse_Angle;
alpha = A+C;
beta = A-C;
gamma = sqrt(beta*beta + B*B );
if ( alpha - gamma <= MagickEpsilon )
Major=MagickMaximumValue;
else
Major=sqrt(2*F/(alpha - gamma));
Minor = sqrt(2*F/(alpha + gamma));
(void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor );
/* other information about ellipse include... */
Eccentricity = Major/Minor;
Ellipse_Area = MagickPI*Major*Minor;
Ellipse_Angle = atan2(B, A-C);
(void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n",
(double) RadiansToDegrees(Ellipse_Angle), Ellipse_Area);
}
#endif
/* If one or both of the scaling vectors is impossibly large
(producing a very large raw F value), we may as well not bother
doing any form of resampling since resampled area is very large.
In this case some alternative means of pixel sampling, such as
the average of the whole image is needed to get a reasonable
result. Calculate only as needed.
*/
if ( (4*A*C - B*B) > MagickMaximumValue ) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse to match the filters support
(that is, multiply F by the square of the support)
Simplier to just multiply it by the support twice!
*/
F *= resample_filter->support;
F *= resample_filter->support;
/* Orthogonal bounds of the ellipse */
resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B));
resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B));
/* Horizontally aligned parallelogram fitted to Ellipse */
resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */
resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */
#if DEBUG_ELLIPSE
(void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n",
resample_filter->Ulimit, resample_filter->Vlimit,
resample_filter->Uwidth, resample_filter->slope );
#endif
/* Check the absolute area of the parallelogram involved.
* This limit needs more work, as it is too slow for larger images
* with tiled views of the horizon.
*/
if ( (resample_filter->Uwidth * resample_filter->Vlimit)
> (4.0*resample_filter->image_area)) {
resample_filter->limit_reached = MagickTrue;
return;
}
/* Scale ellipse formula to directly index the Filter Lookup Table */
{ register double scale;
#if FILTER_LUT
/* scale so that F = WLUT_WIDTH; -- hardcoded */
scale = (double)WLUT_WIDTH/F;
#else
/* scale so that F = resample_filter->F (support^2) */
scale = resample_filter->F/F;
#endif
resample_filter->A = A*scale;
resample_filter->B = B*scale;
resample_filter->C = C*scale;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilter() set the resampling filter lookup table based on a
% specific filter. Note that the filter is used as a radial filter not as a
% two pass othogonally aligned resampling filter.
%
% The format of the SetResampleFilter method is:
%
% void SetResampleFilter(ResampleFilter *resample_filter,
% const FilterType filter)
%
% A description of each parameter follows:
%
% o resample_filter: resampling resample_filterrmation structure
%
% o filter: the resize filter for elliptical weighting LUT
%
*/
MagickExport void SetResampleFilter(ResampleFilter *resample_filter,
const FilterType filter)
{
ResizeFilter
*resize_filter;
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
resample_filter->do_interpolate = MagickFalse;
resample_filter->filter = filter;
/* Default cylindrical filter is a Cubic Keys filter */
if ( filter == UndefinedFilter )
resample_filter->filter = RobidouxFilter;
if ( resample_filter->filter == PointFilter ) {
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
resize_filter = AcquireResizeFilter(resample_filter->image,
resample_filter->filter,MagickTrue,resample_filter->exception);
if (resize_filter == (ResizeFilter *) NULL) {
(void) ThrowMagickException(resample_filter->exception,GetMagickModule(),
ModuleError, "UnableToSetFilteringValue",
"Fall back to Interpolated 'Point' filter");
resample_filter->filter = PointFilter;
resample_filter->do_interpolate = MagickTrue;
return; /* EWA turned off - nothing more to do */
}
/* Get the practical working support for the filter,
* after any API call blur factors have been accoded for.
*/
#if EWA
resample_filter->support = GetResizeFilterSupport(resize_filter);
#else
resample_filter->support = 2.0; /* fixed support size for HQ-EWA */
#endif
#if FILTER_LUT
/* Fill the LUT with the weights from the selected filter function */
{ register int
Q;
double
r_scale;
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = (double)
GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale);
/* finished with the resize filter */
resize_filter = DestroyResizeFilter(resize_filter);
}
#else
/* save the filter and the scaled ellipse bounds needed for filter */
resample_filter->filter_def = resize_filter;
resample_filter->F = resample_filter->support*resample_filter->support;
#endif
/*
Adjust the scaling of the default unit circle
This assumes that any real scaling changes will always
take place AFTER the filter method has been initialized.
*/
ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0);
#if 0
/*
This is old code kept as a reference only. Basically it generates
a Gaussian bell curve, with sigma = 0.5 if the support is 2.0
Create Normal Gaussian 2D Filter Weighted Lookup Table.
A normal EWA guassual lookup would use exp(Q*ALPHA)
where Q = distance squared from 0.0 (center) to 1.0 (edge)
and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767
The table is of length 1024, and equates to support radius of 2.0
thus needs to be scaled by ALPHA*4/1024 and any blur factor squared
The it comes from reference code provided by Fred Weinhaus.
*/
r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur);
for(Q=0; Q<WLUT_WIDTH; Q++)
resample_filter->filter_lut[Q] = exp((double)Q*r_scale);
resample_filter->support = WLUT_WIDTH;
#endif
#if FILTER_LUT
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp single
#endif
{
if (IsStringTrue(GetImageArtifact(resample_filter->image,
"resample:verbose")) != MagickFalse)
{
register int
Q;
double
r_scale;
/* Debug output of the filter weighting LUT
Gnuplot the LUT data, the x scale index has been adjusted
plot [0:2][-.2:1] "lut.dat" with lines
The filter values should be normalized for comparision
*/
printf("#\n");
printf("# Resampling Filter LUT (%d values) for '%s' filter\n",
WLUT_WIDTH, CommandOptionToMnemonic(MagickFilterOptions,
resample_filter->filter) );
printf("#\n");
printf("# Note: values in table are using a squared radius lookup.\n");
printf("# As such its distribution is not uniform.\n");
printf("#\n");
printf("# The X value is the support distance for the Y weight\n");
printf("# so you can use gnuplot to plot this cylindrical filter\n");
printf("# plot [0:2][-.2:1] \"lut.dat\" with lines\n");
printf("#\n");
/* Scale radius so the filter LUT covers the full support range */
r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH);
for(Q=0; Q<WLUT_WIDTH; Q++)
printf("%8.*g %.*g\n",
GetMagickPrecision(),sqrt((double)Q)*r_scale,
GetMagickPrecision(),resample_filter->filter_lut[Q] );
printf("\n\n"); /* generate a 'break' in gnuplot if multiple outputs */
}
/* Output the above once only for each image, and each setting
(void) DeleteImageArtifact(resample_filter->image,"resample:verbose");
*/
}
#endif /* FILTER_LUT */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterInterpolateMethod() sets the resample filter interpolation
% method.
%
% The format of the SetResampleFilterInterpolateMethod method is:
%
% MagickBooleanType SetResampleFilterInterpolateMethod(
% ResampleFilter *resample_filter,const InterpolateMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the interpolation method.
%
*/
MagickExport MagickBooleanType SetResampleFilterInterpolateMethod(
ResampleFilter *resample_filter,const PixelInterpolateMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->interpolate=method;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetResampleFilterVirtualPixelMethod() changes the virtual pixel method
% associated with the specified resample filter.
%
% The format of the SetResampleFilterVirtualPixelMethod method is:
%
% MagickBooleanType SetResampleFilterVirtualPixelMethod(
% ResampleFilter *resample_filter,const VirtualPixelMethod method)
%
% A description of each parameter follows:
%
% o resample_filter: the resample filter.
%
% o method: the virtual pixel method.
%
*/
MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod(
ResampleFilter *resample_filter,const VirtualPixelMethod method)
{
assert(resample_filter != (ResampleFilter *) NULL);
assert(resample_filter->signature == MagickCoreSignature);
assert(resample_filter->image != (Image *) NULL);
if (resample_filter->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
resample_filter->image->filename);
resample_filter->virtual_pixel=method;
if (method != UndefinedVirtualPixelMethod)
(void) SetCacheViewVirtualPixelMethod(resample_filter->view,method);
return(MagickTrue);
}
|
PAddOP.h | #ifndef PAddOP
#define PAddOP
/*
* PAddOP.h:
* (pointwise) add
*
* Created on: June 13, 2017
* Author: mszhang
*/
#include "Eigen/Dense"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
class PAddNode : public Node {
public:
vector<PNode> ins;
~PAddNode() {
ins.clear();
}
public:
PAddNode() : Node() {
ins.clear();
node_type = "point-add";
}
inline void clearValue() {
ins.clear();
Node::clearValue();
}
public:
void forward(Graph *cg, const vector<PNode>& x) {
if (x.size() == 0) {
std::cout << "empty inputs for add" << std::endl;
return;
}
ins.clear();
for (int i = 0; i < x.size(); i++) {
if (x[i]->val.dim == dim) {
ins.push_back(x[i]);
}
else {
std::cout << "dim does not match" << std::endl;
}
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1, PNode x2) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x2->dim == dim) {
ins.push_back(x2);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1, PNode x2, PNode x3) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x2->dim == dim) {
ins.push_back(x2);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x3->dim == dim) {
ins.push_back(x3);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x2->dim == dim) {
ins.push_back(x2);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x3->dim == dim) {
ins.push_back(x3);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x4->dim == dim) {
ins.push_back(x4);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x2->dim == dim) {
ins.push_back(x2);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x3->dim == dim) {
ins.push_back(x3);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x4->dim == dim) {
ins.push_back(x4);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x5->dim == dim) {
ins.push_back(x5);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
void forward(Graph *cg, PNode x1, PNode x2, PNode x3, PNode x4, PNode x5, PNode x6) {
ins.clear();
if (x1->dim == dim) {
ins.push_back(x1);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x2->dim == dim) {
ins.push_back(x2);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x3->dim == dim) {
ins.push_back(x3);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x4->dim == dim) {
ins.push_back(x4);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x5->dim == dim) {
ins.push_back(x5);
}
else {
std::cout << "dim does not match" << std::endl;
}
if (x6->dim == dim) {
ins.push_back(x6);
}
else {
std::cout << "dim does not match" << std::endl;
}
degree = 0;
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
ins[i]->addParent(this);
}
cg->addNode(this);
}
public:
inline void compute() {
int nSize = ins.size();
val.zero();
for (int i = 0; i < nSize; ++i) {
for (int idx = 0; idx < dim; idx++) {
val[idx] += ins[i]->val[idx];
}
}
}
void backward() {
int nSize = ins.size();
for (int i = 0; i < nSize; ++i) {
for (int idx = 0; idx < dim; idx++) {
ins[i]->loss[idx] += loss[idx];
}
}
}
public:
inline PExecute generate(bool bTrain, dtype cur_drop_factor);
// better to rewrite for deep understanding
bool typeEqual(PNode other) {
if (!Node::typeEqual(other)) {
return false;
}
PAddNode *add = static_cast<PAddNode*>(other);
return ins.size() == add->ins.size();
}
size_t typeHashCode() const override {
return (std::hash<int>{}(ins.size()) << 1) ^ Node::typeHashCode();
}
};
class PAddExecute : public Execute {
public:
int in_count;
int dim;
Tensor2D drop_mask;
public:
Tensor1D x, y;
int sumDim;
bool bTrain;
#if USE_GPU
void forward() {
int count = batch.size();
drop_mask.init(dim, count);
CalculateDropMask(count, dim, drop_mask);
std::vector<std::vector<dtype*>> in_vals;
in_vals.reserve(in_count);
for (int i = 0; i < in_count; ++i) {
std::vector<dtype*> ins;
ins.reserve(count);
for (PNode n : batch) {
PAddNode *padd = static_cast<PAddNode*>(n);
ins.push_back(padd->ins.at(i)->val.value);
}
in_vals.push_back(ins);
}
std::vector<dtype *> outs;
outs.reserve(count);
for (PNode n : batch) {
PAddNode *padd = static_cast<PAddNode*>(n);
outs.push_back(padd->val.value);
}
n3ldg_cuda::PAddForward(in_vals, count, dim, in_count, drop_mask.value,
drop_factor, outs);
#if TEST_CUDA
drop_mask.copyFromDeviceToHost();
for (int i = 0; i < count; ++i) {
for (int j = 0; j < dim; ++j) {
dtype v = drop_mask[j][i];
batch[i]->drop_mask[j] = v <= drop_factor ? 0 : 1;
}
}
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
for (Node *n : batch) {
n3ldg_cuda::Assert(n->val.verify("PAdd forward"));
}
#endif
}
#else
void forward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
batch[idx]->forward_drop(bTrain, drop_factor);
}
}
#endif
#if USE_GPU
void backward() {
int count = batch.size();
std::vector<std::vector<dtype*>> in_losses;
in_losses.reserve(in_count);
for (int i = 0; i < in_count; ++i) {
std::vector<dtype*> ins;
ins.reserve(count);
for (PNode n : batch) {
PAddNode *padd = static_cast<PAddNode*>(n);
ins.push_back(padd->ins.at(i)->loss.value);
}
in_losses.push_back(ins);
}
std::vector<dtype *> out_losses;
out_losses.reserve(count);
for (PNode n : batch) {
PAddNode *padd = static_cast<PAddNode*>(n);
out_losses.push_back(padd->loss.value);
}
n3ldg_cuda::PAddBackward(out_losses, count, dim, in_count,
drop_mask.value, drop_factor, in_losses);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
for (Node *n : batch) {
PAddNode *add = static_cast<PAddNode*>(n);
for (Node *in : add->ins) {
n3ldg_cuda::Assert(in->loss.verify("PAddExecute backward"));
}
}
#endif
}
#else
void backward() {
int count = batch.size();
//#pragma omp parallel for
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward_drop();
batch[idx]->backward();
}
}
#endif
};
inline PExecute PAddNode::generate(bool bTrain, dtype cur_drop_factor) {
PAddExecute* exec = new PAddExecute();
exec->batch.push_back(this);
exec->bTrain = bTrain;
exec->drop_factor = cur_drop_factor * drop_value;
exec->in_count = ins.size();
exec->dim = dim;
return exec;
}
#endif
|
mandel-omp.c | /*
* Sequential Mandelbrot program
*
* This program computes and displays all or part of the Mandelbrot
* set. By default, it examines all points in the complex plane
* that have both real and imaginary parts between -2 and 2.
* Command-line parameters allow zooming in on a specific part of
* this range.
*
* Usage:
* mandel [-i maxiter -c x0 y0 -s size -w windowsize]
* where
* maxiter denotes the maximum number of iterations at each point -- by default 1000
* x0, y0, and size specify the range to examine (a square
* centered at (x0 + iy0) of size 2*size by 2*size -- by default,
* a square of size 4 by 4 centered at the origin)
* windowsize denotes the size of the image (diplay window) to compute
*
* Input: none, except the optional command-line arguments
* Output: a graphical display as described in Wilkinson & Allen,
* displayed using the X Window system, plus text output to
* standard output showing the above parameters, plus execution
* time in seconds.
*
* Code based on the original code from Web site for Wilkinson and Allen's
* text on parallel programming:
* http://www.cs.uncc.edu/~abw/parallel/par_prog/
*
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <unistd.h>
#include <malloc.h>
#if _DISPLAY_
#include <X11/Xlib.h>
#include <X11/Xutil.h>
#include <X11/Xos.h>
#endif
#include <sys/time.h>
double getusec_() {
struct timeval time;
gettimeofday(&time, NULL);
return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec);
}
#define START_COUNT_TIME stamp = getusec_();
#define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\
stamp = stamp/1e6;\
printf ("%s: %0.6fs\n",(_m), stamp);
/* Default values for things. */
#define N 2 /* size of problem space (x, y from -N to N) */
#define NPIXELS 800 /* size of display window in pixels */
int row, col; // variables used to traverse the problem space
/* Structure definition for complex numbers */
typedef struct {
double real, imag;
} complex;
#if _DISPLAY_
/* Functions for GUI */
#include "mandelbrot-gui.h" /* has setup(), interact() */
#endif
void mandelbrot(int height,
int width,
double real_min,
double imag_min,
double scale_real,
double scale_imag,
int maxiter,
#if _DISPLAY_
int setup_return,
Display *display,
Window win,
GC gc,
double scale_color,
double min_color)
#else
int ** output)
#endif
{
/* Calculate points and save/display */
//#pragma omp for schedule(runtime)
for (row = 0; row < height; ++row) {
#pragma omp task
for (col = 0; col < width; ++col) {
complex z, c;
z.real = z.imag = 0;
/* Scale display coordinates to actual region */
c.real = real_min + ((double) col * scale_real);
c.imag = imag_min + ((double) (height-1-row) * scale_imag);
/* height-1-row so y axis displays
* with larger values at top
*/
/* Calculate z0, z1, .... until divergence or maximum iterations */
int k = 0;
double lengthsq, temp;
do {
temp = z.real*z.real - z.imag*z.imag + c.real;
z.imag = 2*z.real*z.imag + c.imag;
z.real = temp;
lengthsq = z.real*z.real + z.imag*z.imag;
++k;
} while (lengthsq < (N*N) && k < maxiter);
#if _DISPLAY_
/* Scale color and display point */
long color = (long) ((k-1) * scale_color) + min_color;
if (setup_return == EXIT_SUCCESS) {
XSetForeground (display, gc, color);
XDrawPoint (display, win, gc, col, row);
}
#else
output[row][col]=k;
#endif
}
}
}
int main(int argc, char *argv[]) {
int maxiter = 1000;
double real_min;
double real_max;
double imag_min;
double imag_max;
int width = NPIXELS; /* dimensions of display window */
int height = NPIXELS;
double size=N, x0 = 0, y0 = 0;
#if _DISPLAY_
Display *display;
Window win;
GC gc;
int setup_return;
long min_color = 0, max_color = 0;
double scale_color;
#else
int ** output;
FILE *fp = NULL;
#endif
double scale_real, scale_imag;
/* Process command-line arguments */
for (int i=1; i<argc; i++) {
if (strcmp(argv[i], "-i")==0) {
maxiter = atoi(argv[++i]);
}
else if (strcmp(argv[i], "-w")==0) {
width = atoi(argv[++i]);
height = width;
}
else if (strcmp(argv[i], "-s")==0) {
size = atof(argv[++i]);
}
#if !_DISPLAY_
else if (strcmp(argv[i], "-o")==0) {
if((fp=fopen("mandel.out", "wb"))==NULL) {
fprintf(stderr, "Unable to open file\n");
return EXIT_FAILURE;
}
}
#endif
else if (strcmp(argv[i], "-c")==0) {
x0 = atof(argv[++i]);
y0 = atof(argv[++i]);
}
else {
#if _DISPLAY_
fprintf(stderr, "Usage: %s [-i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
#else
fprintf(stderr, "Usage: %s [-o -i maxiter -w windowsize -c x0 y0 -s size]\n", argv[0]);
fprintf(stderr, " -o to write computed image to disk (default no file generated)\n");
#endif
fprintf(stderr, " -i to specify maximum number of iterations at each point (default 1000)\n");
#if _DISPLAY_
fprintf(stderr, " -w to specify the size of the display window (default 800x800 pixels)\n");
#else
fprintf(stderr, " -w to specify the size of the image to compute (default 800x800 elements)\n");
#endif
fprintf(stderr, " -c to specify the center x0+iy0 of the square to compute (default origin)\n");
fprintf(stderr, " -s to specify the size of the square to compute (default 2, i.e. size 4 by 4)\n");
return EXIT_FAILURE;
}
}
real_min = x0 - size;
real_max = x0 + size;
imag_min = y0 - size;
imag_max = y0 + size;
/* Produce text output */
fprintf(stdout, "\n");
fprintf(stdout, "Mandelbrot program\n");
fprintf(stdout, "center = (%g, %g), size = %g\n",
(real_max + real_min)/2, (imag_max + imag_min)/2,
(real_max - real_min)/2);
fprintf(stdout, "maximum iterations = %d\n", maxiter);
fprintf(stdout, "\n");
#if _DISPLAY_
/* Initialize for graphical display */
setup_return =
setup(width, height, &display, &win, &gc, &min_color, &max_color);
if (setup_return != EXIT_SUCCESS) {
fprintf(stderr, "Unable to initialize display, continuing\n");
return EXIT_FAILURE;
}
#else
output = malloc(height*sizeof(int *));
for (int row = 0; row < height; ++row)
output[row] = malloc(width*sizeof(int));
#endif
/* Compute factors to scale computational region to window */
scale_real = (double) (real_max - real_min) / (double) width;
scale_imag = (double) (imag_max - imag_min) / (double) height;
#if _DISPLAY_
/* Compute factor for color scaling */
scale_color = (double) (max_color - min_color) / (double) (maxiter - 1);
#endif
/* Start timing */
double stamp;
START_COUNT_TIME;
#if _DISPLAY_
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
setup_return, display, win, gc, scale_color, min_color);
#else
mandelbrot(height,width,real_min, imag_min, scale_real, scale_imag, maxiter,
output);
#endif
/* End timing */
STOP_COUNT_TIME("Total execution time");
/* Be sure all output is written */
#if _DISPLAY_
if (setup_return == EXIT_SUCCESS) {
XFlush (display);
}
#else
if (fp != NULL)
{
for (int row = 0; row < height; ++row)
if(fwrite(output[row], sizeof(int), width, fp) != width) {
fprintf(stderr, "Output file not written correctly\n");
}
}
#endif
#if _DISPLAY_
/* Wait for user response, then exit program */
if (setup_return == EXIT_SUCCESS) {
interact(display, &win, width, height,
real_min, real_max, imag_min, imag_max);
}
return EXIT_SUCCESS;
#endif
}
|
divsufsort.c | /*
* divsufsort.c for libdivsufsort
* Copyright (c) 2003-2008 Yuta Mori All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "../include/divsufsort_private.h"
#ifdef _OPENMP
# include <omp.h>
#endif
/*- Private Functions -*/
/* Sorts suffixes of type B*. */
static
saidx_t
sort_typeBstar(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n) {
saidx_t *PAb, *ISAb, *buf;
#ifdef _OPENMP
saidx_t *curbuf;
saidx_t l;
#endif
saidx_t i, j, k, t, m, bufsize;
saint_t c0, c1;
#ifdef _OPENMP
saint_t d0, d1;
int tmp;
#endif
/* Initialize bucket arrays. */
for(i = 0; i < BUCKET_A_SIZE; ++i) { bucket_A[i] = 0; }
for(i = 0; i < BUCKET_B_SIZE; ++i) { bucket_B[i] = 0; }
/* Count the number of occurrences of the first one or two characters of each
type A, B and B* suffix. Moreover, store the beginning position of all
type B* suffixes into the array SA. */
for(i = n - 1, m = n, c0 = T[n - 1]; 0 <= i;) {
/* type A suffix. */
do { ++BUCKET_A(c1 = c0); } while((0 <= --i) && ((c0 = T[i]) >= c1));
if(0 <= i) {
/* type B* suffix. */
++BUCKET_BSTAR(c0, c1);
SA[--m] = i;
/* type B suffix. */
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) {
++BUCKET_B(c0, c1);
}
}
}
m = n - m;
/*
note:
A type B* suffix is lexicographically smaller than a type B suffix that
begins with the same first two characters.
*/
/* Calculate the index of start/end point of each bucket. */
for(c0 = 0, i = 0, j = 0; c0 < ALPHABET_SIZE; ++c0) {
t = i + BUCKET_A(c0);
BUCKET_A(c0) = i + j; /* start point */
i = t + BUCKET_B(c0, c0);
for(c1 = c0 + 1; c1 < ALPHABET_SIZE; ++c1) {
j += BUCKET_BSTAR(c0, c1);
BUCKET_BSTAR(c0, c1) = j; /* end point */
i += BUCKET_B(c0, c1);
}
}
if(0 < m) {
/* Sort the type B* suffixes by their first two characters. */
PAb = SA + n - m; ISAb = SA + m;
for(i = m - 2; 0 <= i; --i) {
t = PAb[i], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = i;
}
t = PAb[m - 1], c0 = T[t], c1 = T[t + 1];
SA[--BUCKET_BSTAR(c0, c1)] = m - 1;
/* Sort the type B* substrings using sssort. */
#ifdef _OPENMP
tmp = omp_get_max_threads();
buf = SA + m, bufsize = (n - (2 * m)) / tmp;
c0 = ALPHABET_SIZE - 2, c1 = ALPHABET_SIZE - 1, j = m;
#pragma omp parallel default(shared) private(curbuf, k, l, d0, d1, tmp)
{
tmp = omp_get_thread_num();
curbuf = buf + tmp * bufsize;
k = 0;
for(;;) {
#pragma omp critical(sssort_lock)
{
if(0 < (l = j)) {
d0 = c0, d1 = c1;
do {
k = BUCKET_BSTAR(d0, d1);
if(--d1 <= d0) {
d1 = ALPHABET_SIZE - 1;
if(--d0 < 0) { break; }
}
} while(((l - k) <= 1) && (0 < (l = k)));
c0 = d0, c1 = d1, j = k;
}
}
if(l == 0) { break; }
sssort(T, PAb, SA + k, SA + l,
curbuf, bufsize, 2, n, *(SA + k) == (m - 1));
}
}
#else
buf = SA + m, bufsize = n - (2 * m);
for(c0 = ALPHABET_SIZE - 2, j = m; 0 < j; --c0) {
for(c1 = ALPHABET_SIZE - 1; c0 < c1; j = i, --c1) {
i = BUCKET_BSTAR(c0, c1);
if(1 < (j - i)) {
sssort(T, PAb, SA + i, SA + j,
buf, bufsize, 2, n, *(SA + i) == (m - 1));
}
}
}
#endif
/* Compute ranks of type B* substrings. */
for(i = m - 1; 0 <= i; --i) {
if(0 <= SA[i]) {
j = i;
do { ISAb[SA[i]] = i; } while((0 <= --i) && (0 <= SA[i]));
SA[i + 1] = i - j;
if(i <= 0) { break; }
}
j = i;
do { ISAb[SA[i] = ~SA[i]] = j; } while(SA[--i] < 0);
ISAb[SA[i]] = j;
}
/* Construct the inverse suffix array of type B* suffixes using trsort. */
trsort(ISAb, SA, m, 1);
/* Set the sorted order of tyoe B* suffixes. */
for(i = n - 1, j = m, c0 = T[n - 1]; 0 <= i;) {
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) >= c1); --i, c1 = c0) { }
if(0 <= i) {
t = i;
for(--i, c1 = c0; (0 <= i) && ((c0 = T[i]) <= c1); --i, c1 = c0) { }
SA[ISAb[--j]] = ((t == 0) || (1 < (t - i))) ? t : ~t;
}
}
/* Calculate the index of start/end point of each bucket. */
BUCKET_B(ALPHABET_SIZE - 1, ALPHABET_SIZE - 1) = n; /* end point */
for(c0 = ALPHABET_SIZE - 2, k = m - 1; 0 <= c0; --c0) {
i = BUCKET_A(c0 + 1) - 1;
for(c1 = ALPHABET_SIZE - 1; c0 < c1; --c1) {
t = i - BUCKET_B(c0, c1);
BUCKET_B(c0, c1) = i; /* end point */
/* Move all type B* suffixes to the correct position. */
for(i = t, j = BUCKET_BSTAR(c0, c1);
j <= k;
--i, --k) { SA[i] = SA[k]; }
}
BUCKET_BSTAR(c0, c0 + 1) = i - BUCKET_B(c0, c0) + 1; /* start point */
BUCKET_B(c0, c0) = i; /* end point */
}
}
return m;
}
/* Constructs the suffix array by using the sorted order of type B* suffixes. */
static
void
construct_SA(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
*j = ~s;
c0 = T[--s];
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else {
assert(((s == 0) && (T[s] == c1)) || (s < 0));
*j = ~s;
}
}
}
}
/* Construct the suffix array by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~(n - 1) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
if((s == 0) || (T[s - 1] < c0)) { s = ~s; }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else {
assert(s < 0);
*i = ~s;
}
}
}
#if 0
/* Constructs the burrows-wheeler transformed string directly
by using the sorted order of type B* suffixes. */
static
saidx_t
construct_BWT(const sauchar_t *T, saidx_t *SA,
saidx_t *bucket_A, saidx_t *bucket_B,
saidx_t n, saidx_t m) {
saidx_t *i, *j, *k, *orig;
saidx_t s;
saint_t c0, c1, c2;
if(0 < m) {
/* Construct the sorted order of type B suffixes by using
the sorted order of type B* suffixes. */
for(c1 = ALPHABET_SIZE - 2; 0 <= c1; --c1) {
/* Scan the suffix array from right to left. */
for(i = SA + BUCKET_BSTAR(c1, c1 + 1),
j = SA + BUCKET_A(c1 + 1) - 1, k = NULL, c2 = -1;
i <= j;
--j) {
if(0 < (s = *j)) {
assert(T[s] == c1);
assert(((s + 1) < n) && (T[s] <= T[s + 1]));
assert(T[s - 1] <= T[s]);
c0 = T[--s];
*j = ~((saidx_t)c0);
if((0 < s) && (T[s - 1] > c0)) { s = ~s; }
if(c0 != c2) {
if(0 <= c2) { BUCKET_B(c2, c1) = k - SA; }
k = SA + BUCKET_B(c2 = c0, c1);
}
assert(k < j);
*k-- = s;
} else if(s != 0) {
*j = ~s;
#ifndef NDEBUG
} else {
assert(T[s] == c1);
#endif
}
}
}
}
/* Construct the BWTed string by using
the sorted order of type B suffixes. */
k = SA + BUCKET_A(c2 = T[n - 1]);
*k++ = (T[n - 2] < c2) ? ~((saidx_t)T[n - 2]) : (n - 1);
/* Scan the suffix array from left to right. */
for(i = SA, j = SA + n, orig = SA; i < j; ++i) {
if(0 < (s = *i)) {
assert(T[s - 1] >= T[s]);
c0 = T[--s];
*i = c0;
if((0 < s) && (T[s - 1] < c0)) { s = ~((saidx_t)T[s - 1]); }
if(c0 != c2) {
BUCKET_A(c2) = k - SA;
k = SA + BUCKET_A(c2 = c0);
}
assert(i < k);
*k++ = s;
} else if(s != 0) {
*i = ~s;
} else {
orig = i;
}
}
return orig - SA;
}
#endif
/*---------------------------------------------------------------------------*/
/**
* Initialize suffix array context
*
* @param ctx suffix array context
* @param zalloc memory allocator
* @param zfree memory deallocator
* @param opaque object passed to memory functions
*
* @return 0 for success, or non-zero in case of an error
*/
int divsufsort_init(divsufsort_ctx_t *ctx, void* (*zalloc)(void *opaque, unsigned int items, unsigned int size), void(*zfree)(void *opaque, void *address), void *opaque) {
ctx->bucket_A = (saidx_t *)zalloc(opaque, BUCKET_A_SIZE, sizeof(saidx_t));
ctx->bucket_B = NULL;
if (ctx->bucket_A) {
ctx->bucket_B = (saidx_t *)zalloc(opaque, BUCKET_B_SIZE, sizeof(saidx_t));
if (ctx->bucket_B)
return 0;
}
divsufsort_destroy(ctx, zfree, opaque);
return -1;
}
/**
* Destroy suffix array context
*
* @param ctx suffix array context to destroy
* @param zfree memory deallocator
* @param opaque object passed to memory functions
*/
void divsufsort_destroy(divsufsort_ctx_t *ctx, void(*zfree)(void *opaque, void *address), void *opaque) {
if (ctx->bucket_B) {
zfree(opaque, ctx->bucket_B);
ctx->bucket_B = NULL;
}
if (ctx->bucket_A) {
zfree(opaque, ctx->bucket_A);
ctx->bucket_A = NULL;
}
}
/*- Function -*/
saint_t
divsufsort_build_array(divsufsort_ctx_t *ctx, const sauchar_t *T, saidx_t *SA, saidx_t n) {
saidx_t m;
saint_t err = 0;
/* Check arguments. */
if((T == NULL) || (SA == NULL) || (n < 0)) { return -1; }
else if(n == 0) { return 0; }
else if(n == 1) { SA[0] = 0; return 0; }
else if(n == 2) { m = (T[0] < T[1]); SA[m ^ 1] = 0, SA[m] = 1; return 0; }
/* Suffixsort. */
if((ctx->bucket_A != NULL) && (ctx->bucket_B != NULL)) {
m = sort_typeBstar(T, SA, ctx->bucket_A, ctx->bucket_B, n);
construct_SA(T, SA, ctx->bucket_A, ctx->bucket_B, n, m);
} else {
err = -2;
}
return err;
}
#if 0
saidx_t
divbwt(const sauchar_t *T, sauchar_t *U, saidx_t *A, saidx_t n) {
saidx_t *B;
saidx_t *bucket_A, *bucket_B;
saidx_t m, pidx, i;
/* Check arguments. */
if((T == NULL) || (U == NULL) || (n < 0)) { return -1; }
else if(n <= 1) { if(n == 1) { U[0] = T[0]; } return n; }
if((B = A) == NULL) { B = (saidx_t *)malloc((size_t)(n + 1) * sizeof(saidx_t)); }
bucket_A = (saidx_t *)malloc(BUCKET_A_SIZE * sizeof(saidx_t));
bucket_B = (saidx_t *)malloc(BUCKET_B_SIZE * sizeof(saidx_t));
/* Burrows-Wheeler Transform. */
if((B != NULL) && (bucket_A != NULL) && (bucket_B != NULL)) {
m = sort_typeBstar(T, B, bucket_A, bucket_B, n);
pidx = construct_BWT(T, B, bucket_A, bucket_B, n, m);
/* Copy to output string. */
U[0] = T[n - 1];
for(i = 0; i < pidx; ++i) { U[i + 1] = (sauchar_t)B[i]; }
for(i += 1; i < n; ++i) { U[i] = (sauchar_t)B[i]; }
pidx += 1;
} else {
pidx = -2;
}
free(bucket_B);
free(bucket_A);
if(A == NULL) { free(B); }
return pidx;
}
const char *
divsufsort_version(void) {
return PROJECT_VERSION_FULL;
}
#endif
|
threshold.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD %
% T H H R R E SS H H O O L D D %
% T HHHHH RRRR EEE SSS HHHHH O O L D D %
% T H H R R E SS H H O O L D D %
% T H H R R EEEEE SSSSS H H OOO LLLLL DDDD %
% %
% %
% MagickCore Image Threshold Methods %
% %
% Software Design %
% Cristy %
% October 1996 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/property.h"
#include "MagickCore/blob.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/configure.h"
#include "MagickCore/constitute.h"
#include "MagickCore/decorate.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/effect.h"
#include "MagickCore/fx.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/montage.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/shear.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/xml-tree.h"
#include "MagickCore/xml-tree-private.h"
/*
Define declarations.
*/
#define ThresholdsFilename "thresholds.xml"
/*
Typedef declarations.
*/
struct _ThresholdMap
{
char
*map_id,
*description;
size_t
width,
height;
ssize_t
divisor,
*levels;
};
/*
Static declarations.
*/
static const char
*MinimalThresholdMap =
"<?xml version=\"1.0\"?>"
"<thresholds>"
" <threshold map=\"threshold\" alias=\"1x1\">"
" <description>Threshold 1x1 (non-dither)</description>"
" <levels width=\"1\" height=\"1\" divisor=\"2\">"
" 1"
" </levels>"
" </threshold>"
" <threshold map=\"checks\" alias=\"2x1\">"
" <description>Checkerboard 2x1 (dither)</description>"
" <levels width=\"2\" height=\"2\" divisor=\"3\">"
" 1 2"
" 2 1"
" </levels>"
" </threshold>"
"</thresholds>";
/*
Forward declarations.
*/
static ThresholdMap
*GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A d a p t i v e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AdaptiveThresholdImage() selects an individual threshold for each pixel
% based on the range of intensity values in its local neighborhood. This
% allows for thresholding of an image whose global intensity histogram
% doesn't contain distinctive peaks.
%
% The format of the AdaptiveThresholdImage method is:
%
% Image *AdaptiveThresholdImage(const Image *image,const size_t width,
% const size_t height,const double bias,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the width of the local neighborhood.
%
% o height: the height of the local neighborhood.
%
% o bias: the mean bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AdaptiveThresholdImage(const Image *image,
const size_t width,const size_t height,const double bias,
ExceptionInfo *exception)
{
#define AdaptiveThresholdImageTag "AdaptiveThreshold/Image"
CacheView
*image_view,
*threshold_view;
Image
*threshold_image;
MagickBooleanType
status;
MagickOffsetType
progress;
MagickSizeType
number_pixels;
ssize_t
y;
/*
Initialize threshold image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
threshold_image=CloneImage(image,0,0,MagickTrue,exception);
if (threshold_image == (Image *) NULL)
return((Image *) NULL);
if ((width == 0) || (height == 0))
return(threshold_image);
status=SetImageStorageClass(threshold_image,DirectClass,exception);
if (status == MagickFalse)
{
threshold_image=DestroyImage(threshold_image);
return((Image *) NULL);
}
/*
Threshold image.
*/
status=MagickTrue;
progress=0;
number_pixels=(MagickSizeType) width*height;
image_view=AcquireVirtualCacheView(image,exception);
threshold_view=AcquireAuthenticCacheView(threshold_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,threshold_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
channel_bias[MaxPixelChannels],
channel_sum[MaxPixelChannels];
register const Quantum
*magick_restrict p,
*magick_restrict pixels;
register Quantum
*magick_restrict q;
register ssize_t
i,
x;
ssize_t
center,
u,
v;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t)
(height/2L),image->columns+width,height,exception);
q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns,
1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+
GetPixelChannels(image)*(width/2);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
pixels=p;
channel_bias[channel]=0.0;
channel_sum[channel]=0.0;
for (v=0; v < (ssize_t) height; v++)
{
for (u=0; u < (ssize_t) width; u++)
{
if (u == (ssize_t) (width-1))
channel_bias[channel]+=pixels[i];
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
mean;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(threshold_traits == UndefinedPixelTrait))
continue;
if ((threshold_traits & CopyPixelTrait) != 0)
{
SetPixelChannel(threshold_image,channel,p[center+i],q);
continue;
}
channel_sum[channel]-=channel_bias[channel];
channel_bias[channel]=0.0;
pixels=p;
for (v=0; v < (ssize_t) height; v++)
{
channel_bias[channel]+=pixels[i];
pixels+=(width-1)*GetPixelChannels(image);
channel_sum[channel]+=pixels[i];
pixels+=GetPixelChannels(image)*(image->columns+1);
}
mean=(double) (channel_sum[channel]/number_pixels+bias);
SetPixelChannel(threshold_image,channel,(Quantum) ((double)
p[center+i] <= mean ? 0 : QuantumRange),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(threshold_image);
}
if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
threshold_image->type=image->type;
threshold_view=DestroyCacheView(threshold_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
threshold_image=DestroyImage(threshold_image);
return(threshold_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A u t o T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AutoThresholdImage() automatically performs image thresholding
% dependent on which method you specify.
%
% The format of the AutoThresholdImage method is:
%
% MagickBooleanType AutoThresholdImage(Image *image,
% const AutoThresholdMethod method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: The image to auto-threshold.
%
% o method: choose from Kapur, OTSU, or Triangle.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double KapurThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
#define MaxIntensity 255
double
*black_entropy,
*cumulative_histogram,
entropy,
epsilon,
maximum_entropy,
*white_entropy;
register ssize_t
i,
j;
size_t
threshold;
/*
Compute optimal threshold from the entopy of the histogram.
*/
cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*cumulative_histogram));
black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*black_entropy));
white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*white_entropy));
if ((cumulative_histogram == (double *) NULL) ||
(black_entropy == (double *) NULL) || (white_entropy == (double *) NULL))
{
if (white_entropy != (double *) NULL)
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
if (black_entropy != (double *) NULL)
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
if (cumulative_histogram != (double *) NULL)
cumulative_histogram=(double *)
RelinquishMagickMemory(cumulative_histogram);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Entropy for black and white parts of the histogram.
*/
cumulative_histogram[0]=histogram[0];
for (i=1; i <= MaxIntensity; i++)
cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i];
epsilon=MagickMinimumValue;
for (j=0; j <= MaxIntensity; j++)
{
/*
Black entropy.
*/
black_entropy[j]=0.0;
if (cumulative_histogram[j] > epsilon)
{
entropy=0.0;
for (i=0; i <= j; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/cumulative_histogram[j]*
log(histogram[i]/cumulative_histogram[j]);
black_entropy[j]=entropy;
}
/*
White entropy.
*/
white_entropy[j]=0.0;
if ((1.0-cumulative_histogram[j]) > epsilon)
{
entropy=0.0;
for (i=j+1; i <= MaxIntensity; i++)
if (histogram[i] > epsilon)
entropy-=histogram[i]/(1.0-cumulative_histogram[j])*
log(histogram[i]/(1.0-cumulative_histogram[j]));
white_entropy[j]=entropy;
}
}
/*
Find histogram bin with maximum entropy.
*/
maximum_entropy=black_entropy[0]+white_entropy[0];
threshold=0;
for (j=1; j <= MaxIntensity; j++)
if ((black_entropy[j]+white_entropy[j]) > maximum_entropy)
{
maximum_entropy=black_entropy[j]+white_entropy[j];
threshold=(size_t) j;
}
/*
Free resources.
*/
white_entropy=(double *) RelinquishMagickMemory(white_entropy);
black_entropy=(double *) RelinquishMagickMemory(black_entropy);
cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram);
return(100.0*threshold/MaxIntensity);
}
static double OTSUThreshold(const Image *image,const double *histogram,
ExceptionInfo *exception)
{
double
max_sigma,
*myu,
*omega,
*probability,
*sigma,
threshold;
register ssize_t
i;
/*
Compute optimal threshold from maximization of inter-class variance.
*/
myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu));
omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega));
probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*probability));
sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma));
if ((myu == (double *) NULL) || (omega == (double *) NULL) ||
(probability == (double *) NULL) || (sigma == (double *) NULL))
{
if (sigma != (double *) NULL)
sigma=(double *) RelinquishMagickMemory(sigma);
if (probability != (double *) NULL)
probability=(double *) RelinquishMagickMemory(probability);
if (omega != (double *) NULL)
omega=(double *) RelinquishMagickMemory(omega);
if (myu != (double *) NULL)
myu=(double *) RelinquishMagickMemory(myu);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(-1.0);
}
/*
Calculate probability density.
*/
for (i=0; i <= (ssize_t) MaxIntensity; i++)
probability[i]=histogram[i];
/*
Generate probability of graylevels and mean value for separation.
*/
omega[0]=probability[0];
myu[0]=0.0;
for (i=1; i <= (ssize_t) MaxIntensity; i++)
{
omega[i]=omega[i-1]+probability[i];
myu[i]=myu[i-1]+i*probability[i];
}
/*
Sigma maximization: inter-class variance and compute optimal threshold.
*/
threshold=0;
max_sigma=0.0;
for (i=0; i < (ssize_t) MaxIntensity; i++)
{
sigma[i]=0.0;
if ((omega[i] != 0.0) && (omega[i] != 1.0))
sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0-
omega[i]));
if (sigma[i] > max_sigma)
{
max_sigma=sigma[i];
threshold=(double) i;
}
}
/*
Free resources.
*/
myu=(double *) RelinquishMagickMemory(myu);
omega=(double *) RelinquishMagickMemory(omega);
probability=(double *) RelinquishMagickMemory(probability);
sigma=(double *) RelinquishMagickMemory(sigma);
return(100.0*threshold/MaxIntensity);
}
static double TriangleThreshold(const double *histogram)
{
double
a,
b,
c,
count,
distance,
inverse_ratio,
max_distance,
segment,
x1,
x2,
y1,
y2;
register ssize_t
i;
ssize_t
end,
max,
start,
threshold;
/*
Compute optimal threshold with triangle algorithm.
*/
start=0; /* find start bin, first bin not zero count */
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > 0.0)
{
start=i;
break;
}
end=0; /* find end bin, last bin not zero count */
for (i=(ssize_t) MaxIntensity; i >= 0; i--)
if (histogram[i] > 0.0)
{
end=i;
break;
}
max=0; /* find max bin, bin with largest count */
count=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
if (histogram[i] > count)
{
max=i;
count=histogram[i];
}
/*
Compute threshold at split point.
*/
x1=(double) max;
y1=histogram[max];
x2=(double) end;
if ((max-start) >= (end-max))
x2=(double) start;
y2=0.0;
a=y1-y2;
b=x2-x1;
c=(-1.0)*(a*x1+b*y1);
inverse_ratio=1.0/sqrt(a*a+b*b+c*c);
threshold=0;
max_distance=0.0;
if (x2 == (double) start)
for (i=start; i < max; i++)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment > 0.0))
{
threshold=i;
max_distance=distance;
}
}
else
for (i=end; i > max; i--)
{
segment=inverse_ratio*(a*i+b*histogram[i]+c);
distance=sqrt(segment*segment);
if ((distance > max_distance) && (segment < 0.0))
{
threshold=i;
max_distance=distance;
}
}
return(100.0*threshold/MaxIntensity);
}
MagickExport MagickBooleanType AutoThresholdImage(Image *image,
const AutoThresholdMethod method,ExceptionInfo *exception)
{
CacheView
*image_view;
char
property[MagickPathExtent];
double
gamma,
*histogram,
sum,
threshold;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
y;
/*
Form histogram.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL,
sizeof(*histogram));
if (histogram == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
status=MagickTrue;
(void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
double intensity = GetPixelIntensity(image,p);
histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++;
p+=GetPixelChannels(image);
}
}
image_view=DestroyCacheView(image_view);
/*
Normalize histogram.
*/
sum=0.0;
for (i=0; i <= (ssize_t) MaxIntensity; i++)
sum+=histogram[i];
gamma=PerceptibleReciprocal(sum);
for (i=0; i <= (ssize_t) MaxIntensity; i++)
histogram[i]=gamma*histogram[i];
/*
Discover threshold from histogram.
*/
switch (method)
{
case KapurThresholdMethod:
{
threshold=KapurThreshold(image,histogram,exception);
break;
}
case OTSUThresholdMethod:
default:
{
threshold=OTSUThreshold(image,histogram,exception);
break;
}
case TriangleThresholdMethod:
{
threshold=TriangleThreshold(histogram);
break;
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
if (threshold < 0.0)
status=MagickFalse;
if (status == MagickFalse)
return(MagickFalse);
/*
Threshold image.
*/
(void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold);
(void) SetImageProperty(image,"auto-threshold:threshold",property,exception);
return(BilevelImage(image,QuantumRange*threshold/100.0,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B i l e v e l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BilevelImage() changes the value of individual pixels based on the
% intensity of each pixel channel. The result is a high-contrast image.
%
% More precisely each channel value of the image is 'thresholded' so that if
% it is equal to or less than the given value it is set to zero, while any
% value greater than that give is set to it maximum or QuantumRange.
%
% This function is what is used to implement the "-threshold" operator for
% the command line API.
%
% If the default channel setting is given the image is thresholded using just
% the gray 'intensity' of the image, rather than the individual channels.
%
% The format of the BilevelImage method is:
%
% MagickBooleanType BilevelImage(Image *image,const double threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold values.
%
% o exception: return any errors or warnings in this structure.
%
% Aside: You can get the same results as operator using LevelImages()
% with the 'threshold' value for both the black_point and the white_point.
%
*/
MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold,
ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
/*
Bilevel threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B l a c k T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BlackThresholdImage() is like ThresholdImage() but forces all pixels below
% the threshold into black while leaving all pixels at or above the threshold
% unchanged.
%
% The format of the BlackThresholdImage method is:
%
% MagickBooleanType BlackThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType BlackThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) SetImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < GetPixelInfoChannel(&threshold,channel))
q[i]=(Quantum) 0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l a m p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClampImage() set each pixel whose value is below zero to zero and any the
% pixel whose value is above the quantum range to the quantum range (e.g.
% 65535) otherwise the pixel value remains unchanged.
%
% The format of the ClampImage method is:
%
% MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception)
{
#define ClampImageTag "Clamp/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) ClampPixel(q->red);
q->green=(double) ClampPixel(q->green);
q->blue=(double) ClampPixel(q->blue);
q->alpha=(double) ClampPixel(q->alpha);
q++;
}
return(SyncImage(image,exception));
}
/*
Clamp image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampPixel((MagickRealType) q[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ClampImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyThresholdMap() de-allocate the given ThresholdMap
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *DestroyThresholdMap(Threshold *map)
%
% A description of each parameter follows.
%
% o map: Pointer to the Threshold map to destroy
%
*/
MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map)
{
assert(map != (ThresholdMap *) NULL);
if (map->map_id != (char *) NULL)
map->map_id=DestroyString(map->map_id);
if (map->description != (char *) NULL)
map->description=DestroyString(map->description);
if (map->levels != (ssize_t *) NULL)
map->levels=(ssize_t *) RelinquishMagickMemory(map->levels);
map=(ThresholdMap *) RelinquishMagickMemory(map);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t T h r e s h o l d M a p %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMap() loads and searches one or more threshold map files for the
% map matching the given name or alias.
%
% The format of the GetThresholdMap method is:
%
% ThresholdMap *GetThresholdMap(const char *map_id,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o map_id: ID of the map to look for.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ThresholdMap *GetThresholdMap(const char *map_id,
ExceptionInfo *exception)
{
ThresholdMap
*map;
map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception);
if (map != (ThresholdMap *) NULL)
return(map);
#if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT)
{
const StringInfo
*option;
LinkedListInfo
*options;
options=GetConfigureOptions(ThresholdsFilename,exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
map=GetThresholdMapFile((const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),map_id,exception);
if (map != (ThresholdMap *) NULL)
break;
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
}
#endif
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetThresholdMapFile() look for a given threshold map name or alias in the
% given XML file data, and return the allocated the map when found.
%
% The format of the ListThresholdMaps method is:
%
% ThresholdMap *GetThresholdMap(const char *xml,const char *filename,
% const char *map_id,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o map_id: ID of the map to look for in XML list.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename,
const char *map_id,ExceptionInfo *exception)
{
char
*p;
const char
*attribute,
*content;
double
value;
register ssize_t
i;
ThresholdMap
*map;
XMLTreeInfo
*description,
*levels,
*threshold,
*thresholds;
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
map=(ThresholdMap *) NULL;
thresholds=NewXMLTree(xml,exception);
if (thresholds == (XMLTreeInfo *) NULL)
return(map);
for (threshold=GetXMLTreeChild(thresholds,"threshold");
threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
attribute=GetXMLTreeAttribute(threshold,"map");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
attribute=GetXMLTreeAttribute(threshold,"alias");
if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0))
break;
}
if (threshold == (XMLTreeInfo *) NULL)
{
thresholds=DestroyXMLTree(thresholds);
return(map);
}
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
levels=GetXMLTreeChild(threshold,"levels");
if (levels == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<levels>, map \"%s\"", map_id);
thresholds=DestroyXMLTree(thresholds);
return(map);
}
map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map));
map->map_id=(char *) NULL;
map->description=(char *) NULL;
map->levels=(ssize_t *) NULL;
attribute=GetXMLTreeAttribute(threshold,"map");
if (attribute != (char *) NULL)
map->map_id=ConstantString(attribute);
content=GetXMLTreeContent(description);
if (content != (char *) NULL)
map->description=ConstantString(content);
attribute=GetXMLTreeAttribute(levels,"width");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->width=StringToUnsignedLong(attribute);
if (map->width == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"height");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->height=StringToUnsignedLong(attribute);
if (map->height == 0)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
attribute=GetXMLTreeAttribute(levels,"divisor");
if (attribute == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->divisor=(ssize_t) StringToLong(attribute);
if (map->divisor < 2)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=GetXMLTreeContent(levels);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<levels>, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height*
sizeof(*map->levels));
if (map->levels == (ssize_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap");
for (i=0; i < (ssize_t) (map->width*map->height); i++)
{
map->levels[i]=(ssize_t) strtol(content,&p,10);
if (p == content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too few values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
if ((map->levels[i] < 0) || (map->levels[i] > map->divisor))
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> %.20g out of range, map \"%s\"",
(double) map->levels[i],map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
content=p;
}
value=(double) strtol(content,&p,10);
(void) value;
if (p != content)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlInvalidContent", "<level> too many values, map \"%s\"",map_id);
thresholds=DestroyXMLTree(thresholds);
map=DestroyThresholdMap(map);
return(map);
}
thresholds=DestroyXMLTree(thresholds);
return(map);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ L i s t T h r e s h o l d M a p F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMapFile() lists the threshold maps and their descriptions
% in the given XML file data.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,const char*xml,
% const char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o xml: The threshold map list in XML format.
%
% o filename: The threshold map XML filename.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml,
const char *filename,ExceptionInfo *exception)
{
const char
*alias,
*content,
*map;
XMLTreeInfo
*description,
*threshold,
*thresholds;
assert( xml != (char *) NULL );
assert( file != (FILE *) NULL );
(void) LogMagickEvent(ConfigureEvent,GetMagickModule(),
"Loading threshold map file \"%s\" ...",filename);
thresholds=NewXMLTree(xml,exception);
if ( thresholds == (XMLTreeInfo *) NULL )
return(MagickFalse);
(void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description");
(void) FormatLocaleFile(file,
"----------------------------------------------------\n");
threshold=GetXMLTreeChild(thresholds,"threshold");
for ( ; threshold != (XMLTreeInfo *) NULL;
threshold=GetNextXMLTreeTag(threshold))
{
map=GetXMLTreeAttribute(threshold,"map");
if (map == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingAttribute", "<map>");
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
alias=GetXMLTreeAttribute(threshold,"alias");
description=GetXMLTreeChild(threshold,"description");
if (description == (XMLTreeInfo *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingElement", "<description>, map \"%s\"",map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
content=GetXMLTreeContent(description);
if (content == (char *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"XmlMissingContent", "<description>, map \"%s\"", map);
thresholds=DestroyXMLTree(thresholds);
return(MagickFalse);
}
(void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "",
content);
}
thresholds=DestroyXMLTree(thresholds);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% L i s t T h r e s h o l d M a p s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ListThresholdMaps() lists the threshold maps and their descriptions
% as defined by "threshold.xml" to a file.
%
% The format of the ListThresholdMaps method is:
%
% MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o file: An pointer to the output FILE.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ListThresholdMaps(FILE *file,
ExceptionInfo *exception)
{
const StringInfo
*option;
LinkedListInfo
*options;
MagickStatusType
status;
status=MagickTrue;
if (file == (FILE *) NULL)
file=stdout;
options=GetConfigureOptions(ThresholdsFilename,exception);
(void) FormatLocaleFile(file,
"\n Threshold Maps for Ordered Dither Operations\n");
option=(const StringInfo *) GetNextValueInLinkedList(options);
while (option != (const StringInfo *) NULL)
{
(void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option));
status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option),
GetStringInfoPath(option),exception);
option=(const StringInfo *) GetNextValueInLinkedList(options);
}
options=DestroyConfigureOptions(options);
return(status != 0 ? MagickTrue : MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% O r d e r e d D i t h e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OrderedDitherImage() will perform a ordered dither based on a number
% of pre-defined dithering threshold maps, but over multiple intensity
% levels, which can be different for different channels, according to the
% input argument.
%
% The format of the OrderedDitherImage method is:
%
% MagickBooleanType OrderedDitherImage(Image *image,
% const char *threshold_map,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold_map: A string containing the name of the threshold dither
% map to use, followed by zero or more numbers representing the number
% of color levels tho dither between.
%
% Any level number less than 2 will be equivalent to 2, and means only
% binary dithering will be applied to each color channel.
%
% No numbers also means a 2 level (bitmap) dither will be applied to all
% channels, while a single number is the number of levels applied to each
% channel in sequence. More numbers will be applied in turn to each of
% the color channels.
%
% For example: "o3x3,6" will generate a 6 level posterization of the
% image with a ordered 3x3 diffused pixel dither being applied between
% each level. While checker,8,8,4 will produce a 332 colormaped image
% with only a single checkerboard hash pattern (50% grey) between each
% color level, to basically double the number of color levels with
% a bare minimim of dithering.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType OrderedDitherImage(Image *image,
const char *threshold_map,ExceptionInfo *exception)
{
#define DitherImageTag "Dither/Image"
CacheView
*image_view;
char
token[MagickPathExtent];
const char
*p;
double
levels[CompositePixelChannel];
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
ssize_t
y;
ThresholdMap
*map;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (threshold_map == (const char *) NULL)
return(MagickTrue);
p=(char *) threshold_map;
while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) &&
(*p != '\0'))
p++;
threshold_map=p;
while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) &&
(*p != '\0'))
{
if ((p-threshold_map) >= (MagickPathExtent-1))
break;
token[p-threshold_map]=(*p);
p++;
}
token[p-threshold_map]='\0';
map=GetThresholdMap(token,exception);
if (map == (ThresholdMap *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),OptionError,
"InvalidArgument","%s : '%s'","ordered-dither",threshold_map);
return(MagickFalse);
}
for (i=0; i < MaxPixelChannels; i++)
levels[i]=2.0;
p=strchr((char *) threshold_map,',');
if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0))
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
for (i=0; (i < MaxPixelChannels); i++)
levels[i]=StringToDouble(token,(char **) NULL);
for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++)
{
(void) GetNextToken(p,&p,MagickPathExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MagickPathExtent,token);
levels[i]=StringToDouble(token,(char **) NULL);
}
}
for (i=0; i < MaxPixelChannels; i++)
if (fabs(levels[i]) >= 1)
levels[i]-=1.0;
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
ssize_t
n;
n=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
ssize_t
level,
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (fabs(levels[n]) < MagickEpsilon)
{
n++;
continue;
}
threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1));
level=threshold/(map->divisor-1);
threshold-=level*(map->divisor-1);
q[i]=ClampToQuantum((double) (level+(threshold >=
map->levels[(x % map->width)+map->width*(y % map->height)]))*
QuantumRange/levels[n]);
n++;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,DitherImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
map=DestroyThresholdMap(map);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P e r c e p t i b l e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PerceptibleImage() set each pixel whose value is less than |epsilon| to
% epsilon or -epsilon (whichever is closer) otherwise the pixel value remains
% unchanged.
%
% The format of the PerceptibleImage method is:
%
% MagickBooleanType PerceptibleImage(Image *image,const double epsilon,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o epsilon: the epsilon threshold (e.g. 1.0e-9).
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PerceptibleThreshold(const Quantum quantum,
const double epsilon)
{
double
sign;
sign=(double) quantum < 0.0 ? -1.0 : 1.0;
if ((sign*quantum) >= epsilon)
return(quantum);
return((Quantum) (sign*epsilon));
}
MagickExport MagickBooleanType PerceptibleImage(Image *image,
const double epsilon,ExceptionInfo *exception)
{
#define PerceptibleImageTag "Perceptible/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->storage_class == PseudoClass)
{
register ssize_t
i;
register PixelInfo
*magick_restrict q;
q=image->colormap;
for (i=0; i < (ssize_t) image->colors; i++)
{
q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red),
epsilon);
q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green),
epsilon);
q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue),
epsilon);
q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha),
epsilon);
q++;
}
return(SyncImage(image,exception));
}
/*
Perceptible image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
q[i]=PerceptibleThreshold(q[i],epsilon);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,PerceptibleImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n d o m T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RandomThresholdImage() changes the value of individual pixels based on the
% intensity of each pixel compared to a random threshold. The result is a
% low-contrast, two color image.
%
% The format of the RandomThresholdImage method is:
%
% MagickBooleanType RandomThresholdImage(Image *image,
% const char *thresholds,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low,high: Specify the high and low thresholds. These values range from
% 0 to QuantumRange.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RandomThresholdImage(Image *image,
const double min_threshold, const double max_threshold,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
GetPixelInfo(image,&threshold);
/*
Random threshold image.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
threshold;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if ((double) q[i] < min_threshold)
threshold=min_threshold;
else
if ((double) q[i] > max_threshold)
threshold=max_threshold;
else
threshold=(double) (QuantumRange*
GetPseudoRandomValue(random_info[id]));
q[i]=(double) q[i] <= threshold ? 0 : QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a n g e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RangeThresholdImage() applies soft and hard thresholding.
%
% The format of the RangeThresholdImage method is:
%
% MagickBooleanType RangeThresholdImage(Image *image,
% const double low_black,const double low_white,const double high_white,
% const double high_black,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o low_black: Define the minimum black threshold value.
%
% o low_white: Define the minimum white threshold value.
%
% o high_white: Define the maximum white threshold value.
%
% o high_black: Define the maximum black threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RangeThresholdImage(Image *image,
const double low_black,const double low_white,const double high_white,
const double high_black,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
/*
Range threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel < low_black)
q[i]=0;
else
if ((pixel >= low_black) && (pixel < low_white))
q[i]=ClampToQuantum(QuantumRange*
PerceptibleReciprocal(low_white-low_black)*(pixel-low_black));
else
if ((pixel >= low_white) && (pixel <= high_white))
q[i]=QuantumRange;
else
if ((pixel > high_white) && (pixel <= high_black))
q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal(
high_black-high_white)*(high_black-pixel));
else
if (pixel > high_black)
q[i]=0;
else
q[i]=0;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% W h i t e T h r e s h o l d I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WhiteThresholdImage() is like ThresholdImage() but forces all pixels above
% the threshold into white while leaving all pixels at or below the threshold
% unchanged.
%
% The format of the WhiteThresholdImage method is:
%
% MagickBooleanType WhiteThresholdImage(Image *image,
% const char *threshold,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o threshold: Define the threshold value.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType WhiteThresholdImage(Image *image,
const char *thresholds,ExceptionInfo *exception)
{
#define ThresholdImageTag "Threshold/Image"
CacheView
*image_view;
GeometryInfo
geometry_info;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
threshold;
MagickStatusType
flags;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (thresholds == (const char *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if (IsGrayColorspace(image->colorspace) != MagickFalse)
(void) TransformImageColorspace(image,sRGBColorspace,exception);
GetPixelInfo(image,&threshold);
flags=ParseGeometry(thresholds,&geometry_info);
threshold.red=geometry_info.rho;
threshold.green=geometry_info.rho;
threshold.blue=geometry_info.rho;
threshold.black=geometry_info.rho;
threshold.alpha=100.0;
if ((flags & SigmaValue) != 0)
threshold.green=geometry_info.sigma;
if ((flags & XiValue) != 0)
threshold.blue=geometry_info.xi;
if ((flags & PsiValue) != 0)
threshold.alpha=geometry_info.psi;
if (threshold.colorspace == CMYKColorspace)
{
if ((flags & PsiValue) != 0)
threshold.black=geometry_info.psi;
if ((flags & ChiValue) != 0)
threshold.alpha=geometry_info.chi;
}
if ((flags & PercentValue) != 0)
{
threshold.red*=(MagickRealType) (QuantumRange/100.0);
threshold.green*=(MagickRealType) (QuantumRange/100.0);
threshold.blue*=(MagickRealType) (QuantumRange/100.0);
threshold.black*=(MagickRealType) (QuantumRange/100.0);
threshold.alpha*=(MagickRealType) (QuantumRange/100.0);
}
/*
White threshold image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
pixel;
register ssize_t
i;
pixel=GetPixelIntensity(image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
if (image->channel_mask != DefaultChannels)
pixel=(double) q[i];
if (pixel > GetPixelInfoChannel(&threshold,channel))
q[i]=QuantumRange;
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,ThresholdImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% John Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/animate.h"
#include "magick/artifact.h"
#include "magick/blob.h"
#include "magick/blob-private.h"
#include "magick/cache.h"
#include "magick/cache-private.h"
#include "magick/cache-view.h"
#include "magick/client.h"
#include "magick/color.h"
#include "magick/color-private.h"
#include "magick/colormap.h"
#include "magick/colorspace.h"
#include "magick/colorspace-private.h"
#include "magick/composite.h"
#include "magick/composite-private.h"
#include "magick/compress.h"
#include "magick/constitute.h"
#include "magick/deprecate.h"
#include "magick/display.h"
#include "magick/draw.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/geometry.h"
#include "magick/histogram.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magic.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/module.h"
#include "magick/monitor.h"
#include "magick/monitor-private.h"
#include "magick/option.h"
#include "magick/paint.h"
#include "magick/pixel-private.h"
#include "magick/profile.h"
#include "magick/property.h"
#include "magick/quantize.h"
#include "magick/random_.h"
#include "magick/segment.h"
#include "magick/semaphore.h"
#include "magick/signature-private.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/threshold.h"
#include "magick/timer.h"
#include "magick/utility.h"
#include "magick/version.h"
#include "magick/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireMagickMemory(sizeof(*image));
if (image == (Image *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MaxTextExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=RGBColorspace;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
image->blur=1.0;
GetExceptionInfo(&image->exception);
(void) QueryColorDatabase(BackgroundColor,&image->background_color,
&image->exception);
(void) QueryColorDatabase(BorderColor,&image->border_color,&image->exception);
(void) QueryColorDatabase(MatteColor,&image->matte_color,&image->exception);
(void) QueryColorDatabase(TransparentColor,&image->transparent_color,
&image->exception);
image->x_resolution=DefaultResolution;
image->y_resolution=DefaultResolution;
image->units=PixelsPerInchResolution;
GetTimerInfo(&image->timer);
image->ping=MagickFalse;
image->cache=AcquirePixelCache(0);
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AllocateSemaphoreInfo();
image->signature=MagickSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,MaxTextExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MaxTextExtent);
(void) CopyMagickString(image->magick,image_info->magick,MaxTextExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->matte_color=image_info->matte_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
(void) SetImageVirtualPixelMethod(image,image_info->virtual_pixel_method);
(void) SyncImageSettings(image_info,image);
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseMagickOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireMagickMemory(sizeof(*image_info));
if (image_info == (ImageInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MaxTextExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MaxTextExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view,
*image_view;
const Image
*image;
Image
*append_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(image,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass) == MagickFalse)
{
InheritException(exception,&append_image->exception);
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
append_image->matte=matte;
(void) SetImageBackgroundColor(append_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
append_view=AcquireCacheView(append_image);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status) omp_throttle(1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickBooleanType
sync;
register const IndexPacket
*restrict indexes;
register const PixelPacket
*restrict p;
register IndexPacket
*restrict append_indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
image->columns,1,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
append_indexes=GetCacheViewAuthenticIndexQueue(append_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetRedPixelComponent(q,GetRedPixelComponent(p));
SetGreenPixelComponent(q,GetGreenPixelComponent(p));
SetBluePixelComponent(q,GetBluePixelComponent(p));
SetOpacityPixelComponent(q,OpaqueOpacity);
if (image->matte != MagickFalse)
SetOpacityPixelComponent(q,GetOpacityPixelComponent(p));
if ((image->colorspace == CMYKColorspace) &&
(append_image->colorspace == CMYKColorspace))
append_indexes[x]=indexes[x];
p++;
q++;
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
proceed=SetImageProgress(image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
GetImageException(image,exception);
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image)
{
return(ClipImagePath(image,"#1",MagickTrue));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatMagickString(property,MaxTextExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(&image->exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,MaxTextExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,MaxTextExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),&image->exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask);
if (SetImageStorageClass(clip_mask,DirectClass) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse);
(void) FormatMagickString(clip_mask->magick_filename,MaxTextExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageClipMask(image,clip_mask);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
MagickRealType
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
clone_image=(Image *) AcquireMagickMemory(sizeof(*clone_image));
if (clone_image == (Image *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickSignature;
clone_image->storage_class=image->storage_class;
clone_image->colorspace=image->colorspace;
clone_image->matte=image->matte;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
if (image->colormap != (PixelPacket *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelPacket *) AcquireQuantumMemory(length,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelPacket *) NULL)
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
(void) CopyMagickMemory(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
GetExceptionInfo(&clone_image->exception);
InheritException(&clone_image->exception,&image->exception);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MaxTextExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MaxTextExtent);
(void) CopyMagickString(clone_image->filename,image->filename,MaxTextExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
clone_image->clip_mask=NewImageList();
clone_image->mask=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AllocateSemaphoreInfo();
if ((columns == 0) && (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
if (image->clip_mask != (Image *) NULL)
clone_image->clip_mask=CloneImage(image->clip_mask,0,0,MagickTrue,
exception);
if (image->mask != (Image *) NULL)
clone_image->mask=CloneImage(image->mask,0,0,MagickTrue,exception);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=(MagickRealType) columns/(MagickRealType) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=(MagickRealType) rows/(MagickRealType) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->columns=columns;
clone_image->rows=rows;
clone_image->cache=ClonePixelCache(image->cache);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->pen=image_info->pen;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->matte_color=image_info->matte_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colors=image_info->colors;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->preview_type=image_info->preview_type;
clone_info->group=image_info->group;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
if (image_info->view != (char *) NULL)
(void) CloneString(&clone_info->view,image_info->view);
if (image_info->authenticate != (char *) NULL)
(void) CloneString(&clone_info->authenticate,image_info->authenticate);
(void) CloneImageOptions(clone_info,image_info);
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->virtual_pixel_method=image_info->virtual_pixel_method;
(void) CopyMagickString(clone_info->magick,image_info->magick,MaxTextExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,MaxTextExtent);
(void) CopyMagickString(clone_info->zero,image_info->zero,MaxTextExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MaxTextExtent);
clone_info->subimage=image_info->scene; /* deprecated */
clone_info->subrange=image_info->number_scenes; /* deprecated */
clone_info->channel=image_info->channel;
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m b i n e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CombineImages() combines one or more images into a single image. The
% grayscale value of the pixels of each image in the sequence is assigned in
% order to the specified channels of the combined image. The typical
% ordering would be image 1 => Red, 2 => Green, 3 => Blue, etc.
%
% The format of the CombineImages method is:
%
% Image *CombineImages(const Image *image,const ChannelType channel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CombineImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
#define CombineImageTag "Combine/Image"
CacheView
*combine_view;
const Image
*next;
Image
*combine_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
/*
Ensure the image are the same size.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if ((next->columns != image->columns) || (next->rows != image->rows))
ThrowImageException(OptionError,"ImagesAreNotTheSameSize");
}
combine_image=CloneImage(image,0,0,MagickTrue,exception);
if (combine_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(combine_image,DirectClass) == MagickFalse)
{
InheritException(exception,&combine_image->exception);
combine_image=DestroyImage(combine_image);
return((Image *) NULL);
}
if ((channel & OpacityChannel) != 0)
combine_image->matte=MagickTrue;
(void) SetImageBackgroundColor(combine_image);
/*
Combine images.
*/
status=MagickTrue;
progress=0;
combine_view=AcquireCacheView(combine_image);
for (y=0; y < (ssize_t) combine_image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
PixelPacket
*pixels;
register const PixelPacket
*restrict p;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(combine_view,0,y,combine_image->columns,
1,exception);
if (pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
next=image;
if (((channel & RedChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetRedPixelComponent(q,PixelIntensityToQuantum(p));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & GreenChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetGreenPixelComponent(q,PixelIntensityToQuantum(p));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & BlueChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetBluePixelComponent(q,PixelIntensityToQuantum(p));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & OpacityChannel) != 0) && (next != (Image *) NULL))
{
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
q=pixels;
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
SetOpacityPixelComponent(q,PixelIntensityToQuantum(p));
p++;
q++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace) && (next != (Image *) NULL))
{
IndexPacket
*indexes;
image_view=AcquireCacheView(next);
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
if (p == (const PixelPacket *) NULL)
continue;
indexes=GetCacheViewAuthenticIndexQueue(combine_view);
for (x=0; x < (ssize_t) combine_image->columns; x++)
{
indexes[x]=PixelIntensityToQuantum(p);
p++;
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
if (SyncCacheViewAuthenticPixels(combine_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,CombineImageTag,progress++,
combine_image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
combine_view=DestroyCacheView(combine_view);
if (status == MagickFalse)
combine_image=DestroyImage(combine_image);
return(combine_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelPacket *) NULL)
image->colormap=(PixelPacket *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info*) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
DestroyBlob(image);
(void) DestroyExceptionInfo(&image->exception);
if (image->semaphore != (SemaphoreInfo *) NULL)
DestroySemaphoreInfo(&image->semaphore);
image->signature=(~MagickSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->view != (char *) NULL)
image_info->view=DestroyString(image_info->view);
if (image_info->authenticate != (char *) NULL)
image_info->authenticate=DestroyString(
image_info->authenticate);
DestroyImageOptions(image_info);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
image_info->signature=(~MagickSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream.
%
% The format of the DisassociateImageStream method is:
%
% MagickBooleanType DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
(void) DetachBlob(image->blob);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageAlphaChannel() returns MagickFalse if the image alpha channel is
% not activated. That is, the image is RGB rather than RGBA or CMYK rather
% than CMYKA.
%
% The format of the GetImageAlphaChannel method is:
%
% MagickBooleanType GetImageAlphaChannel(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType GetImageAlphaChannel(const Image *image)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
return(image->matte);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageClipMask() returns the clip path associated with the image.
%
% The format of the GetImageClipMask method is:
%
% Image *GetImageClipMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageClipMask(const Image *image,
ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->clip_mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->clip_mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageException() traverses an image sequence and returns any
% error more severe than noted by the exception parameter.
%
% The format of the GetImageException method is:
%
% void GetImageException(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: Specifies a pointer to a list of one or more images.
%
% o exception: return the highest severity exception.
%
*/
MagickExport void GetImageException(Image *image,ExceptionInfo *exception)
{
register Image
*next;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
for (next=image; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->exception.severity == UndefinedException)
continue;
if (next->exception.severity > exception->severity)
InheritException(exception,&next->exception);
next->exception.severity=UndefinedException;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
const char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) ResetMagickMemory(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
image_info->synchronize=IsMagickTrue(synchronize);
exception=AcquireExceptionInfo();
(void) QueryColorDatabase(BackgroundColor,&image_info->background_color,
exception);
(void) QueryColorDatabase(BorderColor,&image_info->border_color,exception);
(void) QueryColorDatabase(MatteColor,&image_info->matte_color,exception);
(void) QueryColorDatabase(TransparentColor,&image_info->transparent_color,
exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *GetImageMask(const Image *image,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->mask == (Image *) NULL)
return((Image *) NULL);
return(CloneImage(image->mask,0,0,MagickTrue,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,
% Image *image,const char *format,int value,char *filename)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
canonical=MagickFalse;
(void) CopyMagickString(filename,format,MaxTextExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
if (*q == '0')
{
ssize_t
value;
value=(ssize_t) strtol(q,&q,10);
(void) value;
}
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatMagickString(filename+(p-format),(size_t) (MaxTextExtent-
(p-format)),p,value);
*q=c;
(void) ConcatenateMagickString(filename,q,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MaxTextExtent];
const char
*value;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MaxTextExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
value=(const char *) NULL;
if ((image_info != (const ImageInfo *) NULL) &&
(image != (const Image *) NULL))
value=GetMagickProperty(image_info,image,pattern);
else
if (image != (Image *) NULL)
value=GetImageProperty(image,pattern);
else
if (image_info != (ImageInfo *) NULL)
value=GetImageOption(image_info,pattern);
if (value == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format),value,(size_t)
(MaxTextExtent-(p-format)));
*q=c;
(void) ConcatenateMagickString(filename,r+1,MaxTextExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
{
(void) CopyMagickString(q,q+1,(size_t) (MaxTextExtent-(q-filename)));
canonical=MagickTrue;
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MaxTextExtent);
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
MagickPixelPacket
zero;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
GetMagickPixelPacket(image,&zero);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
MagickPixelPacket
pixel;
register const IndexPacket
*indexes;
register const PixelPacket
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewVirtualIndexQueue(image_view);
pixel=zero;
for (x=0; x < (ssize_t) image->columns; x++)
{
SetMagickPixelPacket(image,p,indexes+x,&pixel);
if ((pixel.red < 0.0) || (pixel.red > QuantumRange) ||
(pixel.red != (QuantumAny) pixel.red))
break;
if ((pixel.green < 0.0) || (pixel.green > QuantumRange) ||
(pixel.green != (QuantumAny) pixel.green))
break;
if ((pixel.blue < 0.0) || (pixel.blue > QuantumRange) ||
(pixel.blue != (QuantumAny) pixel.blue))
break;
if (pixel.matte != MagickFalse)
{
if ((pixel.opacity < 0.0) || (pixel.opacity > QuantumRange) ||
(pixel.opacity != (QuantumAny) pixel.opacity))
break;
}
if (pixel.colorspace == CMYKColorspace)
{
if ((pixel.index < 0.0) || (pixel.index > QuantumRange) ||
(pixel.index != (QuantumAny) pixel.index))
break;
}
p++;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MaxTextExtent],
filename[MaxTextExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
(void) CopyMagickString(magick,image->magick,MaxTextExtent);
(void) CopyMagickString(filename,image->filename,MaxTextExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,
% const size_t width,const size_t height,
% const MagickPixelPacket *background)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,
const MagickPixelPacket *background)
{
CacheView
*image_view;
ExceptionInfo
*exception;
Image
*image;
ssize_t
y;
MagickBooleanType
status;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickSignature);
assert(background != (const MagickPixelPacket *) NULL);
image=AcquireImage(image_info);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->matte=background->matte;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,background,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImageChannel() separates a channel from the image and returns it as
% a grayscale image. A channel is a particular color component of each pixel
% in the image.
%
% The format of the SeparateImageChannel method is:
%
% MagickBooleanType SeparateImageChannel(Image *image,
% const ChannelType channel)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channel to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
*/
MagickExport MagickBooleanType SeparateImageChannel(Image *image,
const ChannelType channel)
{
#define SeparateImageTag "Separate/Image"
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
/*
Separate image channels.
*/
status=MagickTrue;
if (channel == GrayChannels)
image->matte=MagickTrue;
progress=0;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(progress,status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
switch (channel)
{
case RedChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->green=q->red;
q->blue=q->red;
q++;
}
break;
}
case GreenChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=q->green;
q->blue=q->green;
q++;
}
break;
}
case BlueChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=q->blue;
q->green=q->blue;
q++;
}
break;
}
case OpacityChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=q->opacity;
q->green=q->opacity;
q->blue=q->opacity;
q++;
}
break;
}
case BlackChannel:
{
if ((image->storage_class != PseudoClass) &&
(image->colorspace != CMYKColorspace))
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=indexes[x];
q->green=indexes[x];
q->blue=indexes[x];
q++;
}
break;
}
case TrueAlphaChannel:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->red=(Quantum) GetAlphaPixelComponent(q);
q->green=(Quantum) GetAlphaPixelComponent(q);
q->blue=(Quantum) GetAlphaPixelComponent(q);
q++;
}
break;
}
case GrayChannels:
{
for (x=0; x < (ssize_t) image->columns; x++)
{
q->opacity=(Quantum) (QuantumRange-PixelIntensityToQuantum(q));
q++;
}
break;
}
default:
break;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SeparateImageChannel)
#endif
proceed=SetImageProgress(image,SeparateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
if (channel != GrayChannels)
image->matte=MagickFalse;
(void) SetImageColorspace(image,RGBColorspace);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e p a r a t e I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SeparateImages() returns a separate grayscale image for each channel
% specified.
%
% The format of the SeparateImages method is:
%
% MagickBooleanType SeparateImages(const Image *image,
% const ChannelType channel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel: Identify which channels to extract: RedChannel, GreenChannel,
% BlueChannel, OpacityChannel, CyanChannel, MagentaChannel,
% YellowChannel, or BlackChannel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *SeparateImages(const Image *image,const ChannelType channel,
ExceptionInfo *exception)
{
Image
*images,
*separate_image;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
images=NewImageList();
if ((channel & RedChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,RedChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & GreenChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,GreenChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & BlueChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlueChannel);
AppendImageToList(&images,separate_image);
}
if (((channel & BlackChannel) != 0) && (image->colorspace == CMYKColorspace))
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,BlackChannel);
AppendImageToList(&images,separate_image);
}
if ((channel & OpacityChannel) != 0)
{
separate_image=CloneImage(image,0,0,MagickTrue,exception);
(void) SeparateImageChannel(separate_image,OpacityChannel);
AppendImageToList(&images,separate_image);
}
return(images);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlphaChannel() activates, deactivates, resets, or sets the alpha
% channel.
%
% The format of the SetImageAlphaChannel method is:
%
% MagickBooleanType SetImageAlphaChannel(Image *image,
% const AlphaChannelType alpha_type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha_type: The alpha channel type: ActivateAlphaChannel,
% CopyAlphaChannel, DeactivateAlphaChannel, ExtractAlphaChannel,
% OpaqueAlphaChannel, ResetAlphaChannel, SetAlphaChannel,
% ShapeAlphaChannel, and TransparentAlphaChannel.
%
*/
MagickExport MagickBooleanType SetImageAlphaChannel(Image *image,
const AlphaChannelType alpha_type)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickFalse;
switch (alpha_type)
{
case ActivateAlphaChannel:
{
image->matte=MagickTrue;
break;
}
case BackgroundAlphaChannel:
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
/*
Set transparent pixels to background color.
*/
if (image->matte == MagickFalse)
break;
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
break;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,
exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
if (q->opacity == TransparentOpacity)
{
q->red=pixel.red;
q->green=pixel.green;
q->blue=pixel.blue;
}
q++;
}
if (image->colorspace == CMYKColorspace)
{
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
indexes[x]=index;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
case DeactivateAlphaChannel:
{
image->matte=MagickFalse;
break;
}
case ShapeAlphaChannel:
case CopyAlphaChannel:
{
/*
Special usage case for SeparateImageChannel(): copy grayscale color to
the alpha channel.
*/
status=SeparateImageChannel(image,GrayChannels);
image->matte=MagickTrue; /* make sure transparency is now on! */
if (alpha_type == ShapeAlphaChannel)
{
MagickPixelPacket
background;
/*
Reset all color channels to background color.
*/
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&(image->background_color),(IndexPacket *)
NULL,&background);
(void) LevelColorsImage(image,&background,&background,MagickTrue);
}
break;
}
case ExtractAlphaChannel:
{
status=SeparateImageChannel(image,TrueAlphaChannel);
image->matte=MagickFalse;
break;
}
case ResetAlphaChannel: /* deprecated */
case OpaqueAlphaChannel:
{
status=SetImageOpacity(image,OpaqueOpacity);
image->matte=MagickTrue;
break;
}
case TransparentAlphaChannel:
{
status=SetImageOpacity(image,TransparentOpacity);
image->matte=MagickTrue;
break;
}
case SetAlphaChannel:
{
if (image->matte == MagickFalse)
{
status=SetImageOpacity(image,OpaqueOpacity);
image->matte=MagickTrue;
}
break;
}
case UndefinedAlphaChannel:
break;
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
IndexPacket
index;
MagickBooleanType
status;
MagickPixelPacket
background;
PixelPacket
pixel;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
if (image->background_color.opacity != OpaqueOpacity)
image->matte=MagickTrue;
GetMagickPixelPacket(image,&background);
SetMagickPixelPacket(image,&image->background_color,(const IndexPacket *)
NULL,&background);
if (image->colorspace == CMYKColorspace)
ConvertRGBToCMYK(&background);
index=0;
SetPixelPacket(image,&background,&pixel,&index);
/*
Set image background color.
*/
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
*q++=pixel;
if (image->colorspace == CMYKColorspace)
{
register IndexPacket
*restrict indexes;
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
indexes[x]=index;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,
% const MagickPixelPacket *color)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const MagickPixelPacket *color)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
assert(color != (const MagickPixelPacket *) NULL);
image->colorspace=color->colorspace;
image->matte=color->matte;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelPacket(image,color,q,indexes+x);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class)
{
image->storage_class=storage_class;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C l i p M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageClipMask() associates a clip path with the image. The clip path
% must be the same dimensions as the image. Set any pixel component of
% the clip path to TransparentOpacity to prevent that corresponding image
% pixel component from being updated when SyncAuthenticPixels() is applied.
%
% The format of the SetImageClipMask method is:
%
% MagickBooleanType SetImageClipMask(Image *image,const Image *clip_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clip_mask: the image clip path.
%
*/
MagickExport MagickBooleanType SetImageClipMask(Image *image,
const Image *clip_mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (clip_mask != (const Image *) NULL)
if ((clip_mask->columns != image->columns) ||
(clip_mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->clip_mask != (Image *) NULL)
image->clip_mask=DestroyImage(image->clip_mask);
image->clip_mask=NewImageList();
if (clip_mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->clip_mask=CloneImage(clip_mask,0,0,MagickTrue,&image->exception);
if (image->clip_mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,
% const size_t columns,const size_t rows)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,
const size_t columns,const size_t rows)
{
if ((columns == 0) || (rows == 0))
return(MagickFalse);
image->columns=columns;
image->rows=rows;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the `magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, `ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: `image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
extension[MaxTextExtent],
filename[MaxTextExtent],
magic[MaxTextExtent],
*q,
subimage[MaxTextExtent];
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
unsigned char
magick[2*MaxTextExtent];
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*subimage='\0';
if (frames == 0)
{
GetPathComponent(image_info->filename,SubimagePath,subimage);
if (*subimage != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(subimage,MagickFalse) == MagickFalse)
{
if (IsGeometry(subimage) != MagickFalse)
(void) CloneString(&image_info->extract,subimage);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,subimage);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
image_info->subimage=image_info->scene;
image_info->subrange=image_info->number_scenes;
}
}
}
*extension='\0';
GetPathComponent(image_info->filename,ExtensionPath,extension);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*extension != '\0')
if ((LocaleCompare(extension,"gz") == 0) ||
(LocaleCompare(extension,"Z") == 0) ||
(LocaleCompare(extension,"wmz") == 0))
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*extension != '\0')
if (LocaleCompare(extension,"bz2") == 0)
{
char
path[MaxTextExtent];
(void) CopyMagickString(path,image_info->filename,MaxTextExtent);
path[strlen(path)-strlen(extension)-1]='\0';
GetPathComponent(path,ExtensionPath,extension);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if (*extension != '\0')
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"EPHEMERAL",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,extension,MaxTextExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
(void) CopyMagickString(magic,image_info->magick,MaxTextExtent);
else
{
/*
User specified image format.
*/
LocaleUpper(magic);
if (IsMagickConflict(magic) == MagickFalse)
{
(void) CopyMagickString(image_info->magick,magic,MaxTextExtent);
if (LocaleCompare(magic,"EPHEMERAL") != 0)
image_info->affirm=MagickTrue;
else
image_info->temporary=MagickTrue;
}
}
magick_info=GetMagickInfo(magic,sans_exception);
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
GetPathComponent(image_info->filename,CanonicalPath,filename);
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,filename);
if ((LocaleCompare(filename,image_info->filename) != 0) &&
(strchr(filename,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
/*
Determine the image format from the first few bytes of the file.
*/
image=AcquireImage(image_info);
(void) CopyMagickString(image->filename,image_info->filename,
MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy standard input or pipe to temporary file.
*/
*filename='\0';
status=ImageToFile(image,filename,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,filename,MaxTextExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,filename,MaxTextExtent);
image_info->temporary=MagickTrue;
}
(void) ResetMagickMemory(magick,0,sizeof(magick));
count=ReadBlob(image,2*MaxTextExtent,magick);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic.xml configuration file.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
(void) CopyMagickString(image_info->magick,GetMagicName(magic_info),
MaxTextExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const Image *mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mask: the image mask.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,
const Image *mask)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (mask != (const Image *) NULL)
if ((mask->columns != image->columns) || (mask->rows != image->rows))
ThrowBinaryException(ImageError,"ImageSizeDiffers",image->filename);
if (image->mask != (Image *) NULL)
image->mask=DestroyImage(image->mask);
image->mask=NewImageList();
if (mask == (Image *) NULL)
return(MagickTrue);
if (SetImageStorageClass(image,DirectClass) == MagickFalse)
return(MagickFalse);
image->mask=CloneImage(mask,0,0,MagickTrue,&image->exception);
if (image->mask == (Image *) NULL)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e O p a c i t y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageOpacity() sets the opacity levels of the image.
%
% The format of the SetImageOpacity method is:
%
% MagickBooleanType SetImageOpacity(Image *image,const Quantum opacity)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o opacity: the level of transparency: 0 is fully opaque and QuantumRange is
% fully transparent.
%
*/
MagickExport MagickBooleanType SetImageOpacity(Image *image,
const Quantum opacity)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
image->matte=opacity != OpaqueOpacity ? MagickTrue : MagickFalse;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetOpacityPixelComponent(q,opacity);
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageType() sets the type of image. Choose from these types:
%
% Bilevel Grayscale GrayscaleMatte
% Palette PaletteMatte TrueColor
% TrueColorMatte ColorSeparation ColorSeparationMatte
% OptimizeType
%
% The format of the SetImageType method is:
%
% MagickBooleanType SetImageType(Image *image,const ImageType type)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: Image type.
%
*/
MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type)
{
const char
*artifact;
ImageInfo
*image_info;
MagickBooleanType
status;
QuantizeInfo
*quantize_info;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
status=MagickTrue;
image_info=AcquireImageInfo();
image_info->dither=image->dither;
artifact=GetImageArtifact(image,"dither");
if (artifact != (const char *) NULL)
(void) SetImageOption(image_info,"dither",artifact);
switch (type)
{
case BilevelType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (IsMonochromeImage(image,&image->exception) == MagickFalse)
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=2;
quantize_info->colorspace=GRAYColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case GrayscaleType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
image->matte=MagickFalse;
break;
}
case GrayscaleMatteType:
{
if (IsGrayImage(image,&image->exception) == MagickFalse)
status=TransformImageColorspace(image,GRAYColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case PaletteType:
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
if ((image->storage_class == DirectClass) || (image->colors > 256))
{
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->number_colors=256;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
}
image->matte=MagickFalse;
break;
}
case PaletteBilevelMatteType:
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
(void) BilevelImageChannel(image,AlphaChannel,(double) QuantumRange/2.0);
quantize_info=AcquireQuantizeInfo(image_info);
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case PaletteMatteType:
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
quantize_info=AcquireQuantizeInfo(image_info);
quantize_info->colorspace=TransparentColorspace;
status=QuantizeImage(quantize_info,image);
quantize_info=DestroyQuantizeInfo(quantize_info);
break;
}
case TrueColorType:
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case TrueColorMatteType:
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case ColorSeparationType:
{
if (image->colorspace != CMYKColorspace)
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
image->matte=MagickFalse;
break;
}
case ColorSeparationMatteType:
{
if (image->colorspace != CMYKColorspace)
{
if (image->colorspace != RGBColorspace)
status=TransformImageColorspace(image,RGBColorspace);
status=TransformImageColorspace(image,CMYKColorspace);
}
if (image->storage_class != DirectClass)
status=SetImageStorageClass(image,DirectClass);
if (image->matte == MagickFalse)
(void) SetImageAlphaChannel(image,OpaqueAlphaChannel);
break;
}
case OptimizeType:
case UndefinedType:
break;
}
image->type=type;
image_info=DestroyImageInfo(image_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
% const VirtualPixelMethod virtual_pixel_method)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(const Image *image,
const VirtualPixelMethod virtual_pixel_method)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const PixelPacket
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireCacheView(left_image);
right_view=AcquireCacheView(right_image);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(p->opacity != TransparentOpacity) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(p->opacity != TransparentOpacity) || ((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const PixelPacket
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireCacheView(top_image);
bottom_view=AcquireCacheView(bottom_image);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const PixelPacket *) NULL) ||
(p->opacity != TransparentOpacity) || ((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const PixelPacket *) NULL) ||
(p->opacity != TransparentOpacity) || ((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
CacheView
*smush_view;
const Image
*image;
Image
*smush_image;
MagickBooleanType
matte,
proceed,
status;
MagickOffsetType
n;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickSignature);
image=images;
matte=image->matte;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->matte != MagickFalse)
matte=MagickTrue;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass) == MagickFalse)
{
InheritException(exception,&smush_image->exception);
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->matte=matte;
(void) SetImageBackgroundColor(smush_image);
status=MagickTrue;
x_offset=0;
y_offset=0;
smush_view=AcquireCacheView(smush_image);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,OverCompositeOp,image,x_offset,y_offset);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
smush_view=DestroyCacheView(smush_view);
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType StripImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
(void) SetImageArtifact(image,"png:include-chunk","none,gama");
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static inline IndexPacket PushColormapIndex(Image *image,
const size_t index,MagickBooleanType *range_exception)
{
if (index < image->colors)
return((IndexPacket) index);
*range_exception=MagickTrue;
return((IndexPacket) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image)
{
CacheView
*image_view;
ExceptionInfo
*exception;
MagickBooleanType
range_exception,
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickSignature);
if (image->storage_class == DirectClass)
return(MagickFalse);
range_exception=MagickFalse;
status=MagickTrue;
exception=(&image->exception);
image_view=AcquireCacheView(image);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic,4) shared(status)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
IndexPacket
index;
PixelPacket
pixel;
register IndexPacket
*restrict indexes;
register PixelPacket
*restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
indexes=GetCacheViewAuthenticIndexQueue(image_view);
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,(size_t) indexes[x],
&range_exception);
pixel=image->colormap[(ssize_t) index];
q->red=pixel.red;
q->green=pixel.green;
q->blue=pixel.blue;
if (image->matte != MagickFalse)
q->opacity=pixel.opacity;
q++;
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (range_exception != MagickFalse)
(void) ThrowMagickException(&image->exception,GetMagickModule(),
CorruptImageError,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() sync the image info options to the image.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image)
{
char
property[MaxTextExtent];
const char
*option,
*value;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->background_color,
&image->exception);
option=GetImageOption(image_info,"bias");
if (option != (const char *) NULL)
image->bias=SiPrefixToDouble(option,QuantumRange);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseMagickOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->border_color,&image->exception);
option=GetImageOption(image_info,"colors");
if (option != (const char *) NULL)
image->colors=StringToUnsignedLong(option);
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseMagickOption(MagickComposeOptions,
MagickFalse,option);
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseMagickOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseMagickOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
/*
Set image density.
*/
flags=ParseGeometry(option,&geometry_info);
image->x_resolution=geometry_info.rho;
image->y_resolution=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->y_resolution=image->x_resolution;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseMagickOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterTypes) ParseMagickOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=SiPrefixToDouble(option,QuantumRange);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseMagickOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseMagickOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseMagickOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(InterpolatePixelMethod) ParseMagickOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->matte_color,&image->exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseMagickOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseMagickOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorDatabase(option,&image->transparent_color,
&image->exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseMagickOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
if (option != (const char *) NULL)
units=(ResolutionType) ParseMagickOption(MagickResolutionOptions,
MagickFalse,option);
else
units = image_info->units;
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->x_resolution/=2.54;
image->y_resolution/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->x_resolution=(double) ((size_t) (100.0*2.54*
image->x_resolution+0.5))/100.0;
image->y_resolution=(double) ((size_t) (100.0*2.54*
image->y_resolution+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
}
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
ResetImageOptionIterator(image_info);
for (option=GetNextImageOption(image_info); option != (const char *) NULL; )
{
value=GetImageOption(image_info,option);
if (value != (const char *) NULL)
{
(void) FormatMagickString(property,MaxTextExtent,"%s",option);
(void) SetImageArtifact(image,property,value);
}
option=GetNextImageOption(image_info);
}
return(MagickTrue);
}
|
polybench.c | /**
* polybench.c: This file is part of the PolyBench/C 3.2 test suite.
*
*
* Contact: Louis-Noel Pouchet <pouchet@cse.ohio-state.edu>
* Web address: http://polybench.sourceforge.net
*/
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
#include <unistd.h>
#include <assert.h>
#include <time.h>
#include <sys/time.h>
#include <sys/resource.h>
#include <sched.h>
#include <math.h>
#ifdef _OPENMP
# include <omp.h>
#endif
/* By default, collect PAPI counters on thread 0. */
#ifndef POLYBENCH_THREAD_MONITOR
# define POLYBENCH_THREAD_MONITOR 0
#endif
/* Total LLC cache size. By default 32+MB.. */
#ifndef POLYBENCH_CACHE_SIZE_KB
# define POLYBENCH_CACHE_SIZE_KB 32770
#endif
int polybench_papi_counters_threadid = POLYBENCH_THREAD_MONITOR;
double polybench_program_total_flops = 0;
#ifdef POLYBENCH_PAPI
# include <papi.h>
# define POLYBENCH_MAX_NB_PAPI_COUNTERS 96
char* _polybench_papi_eventlist[] = {
#include "papi_counters.list"
NULL
};
int polybench_papi_eventset;
int polybench_papi_eventlist[POLYBENCH_MAX_NB_PAPI_COUNTERS];
long_long polybench_papi_values[POLYBENCH_MAX_NB_PAPI_COUNTERS];
#endif
/* Timer code (gettimeofday). */
double polybench_t_start, polybench_t_end;
/* Timer code (RDTSC). */
unsigned long long int polybench_c_start, polybench_c_end;
static
double rtclock()
{
#ifdef POLYBENCH_TIME
struct timeval Tp;
int stat;
stat = gettimeofday (&Tp, NULL);
if (stat != 0)
printf ("Error return from gettimeofday: %d", stat);
return (Tp.tv_sec + Tp.tv_usec * 1.0e-6);
#else
return 0;
#endif
}
#ifdef POLYBENCH_CYCLE_ACCURATE_TIMER
static
unsigned long long int rdtsc()
{
unsigned long long int ret = 0;
unsigned int cycles_lo;
unsigned int cycles_hi;
__asm__ volatile ("RDTSC" : "=a" (cycles_lo), "=d" (cycles_hi));
ret = (unsigned long long int)cycles_hi << 32 | cycles_lo;
return ret;
}
#endif
void polybench_flush_cache()
{
int cs = POLYBENCH_CACHE_SIZE_KB * 1024 / sizeof(double);
double* flush = (double*) calloc (cs, sizeof(double));
int i;
double tmp = 0.0;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (i = 0; i < cs; i++)
tmp += flush[i];
assert (tmp <= 10.0);
free (flush);
}
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
void polybench_linux_fifo_scheduler()
{
/* Use FIFO scheduler to limit OS interference. Program must be run
as root, and this works only for Linux kernels. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_FIFO);
sched_setscheduler (0, SCHED_FIFO, &schedParam);
}
void polybench_linux_standard_scheduler()
{
/* Restore to standard scheduler policy. */
struct sched_param schedParam;
schedParam.sched_priority = sched_get_priority_max (SCHED_OTHER);
sched_setscheduler (0, SCHED_OTHER, &schedParam);
}
#endif
#ifdef POLYBENCH_PAPI
static
void test_fail(char *file, int line, char *call, int retval)
{
char buf[128];
memset(buf, '\0', sizeof(buf));
if (retval != 0)
fprintf (stdout,"%-40s FAILED\nLine # %d\n", file, line);
else
{
fprintf (stdout,"%-40s SKIPPED\n", file);
fprintf (stdout,"Line # %d\n", line);
}
if (retval == PAPI_ESYS)
{
sprintf (buf, "System error in %s", call);
perror (buf);
}
else if (retval > 0)
fprintf (stdout,"Error: %s\n", call);
else if (retval == 0)
fprintf (stdout,"Error: %s\n", call);
else
{
char errstring[PAPI_MAX_STR_LEN];
PAPI_perror (retval, errstring, PAPI_MAX_STR_LEN);
fprintf (stdout,"Error in %s: %s\n", call, errstring);
}
fprintf (stdout,"\n");
if (PAPI_is_initialized ())
PAPI_shutdown ();
exit (1);
}
void polybench_papi_init()
{
# ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
if (omp_get_max_threads () < polybench_papi_counters_threadid)
polybench_papi_counters_threadid = omp_get_max_threads () - 1;
}
#pragma omp barrier
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
polybench_papi_eventset = PAPI_NULL;
if ((retval = PAPI_library_init (PAPI_VER_CURRENT)) != PAPI_VER_CURRENT)
test_fail (__FILE__, __LINE__, "PAPI_library_init", retval);
if ((retval = PAPI_create_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_create_eventset", retval);
int k;
for (k = 0; _polybench_papi_eventlist[k]; ++k)
{
if ((retval =
PAPI_event_name_to_code (_polybench_papi_eventlist[k],
&(polybench_papi_eventlist[k])))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_event_name_to_code", retval);
}
polybench_papi_eventlist[k] = 0;
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_close()
{
# ifdef _OPENMP
#pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
if ((retval = PAPI_destroy_eventset (&polybench_papi_eventset))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_destroy_eventset", retval);
if (PAPI_is_initialized ())
PAPI_shutdown ();
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
int polybench_papi_start_counter(int evid)
{
# ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache();
# endif
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval = 1;
char descr[PAPI_MAX_STR_LEN];
PAPI_event_info_t evinfo;
PAPI_event_code_to_name (polybench_papi_eventlist[evid], descr);
if (PAPI_add_event (polybench_papi_eventset,
polybench_papi_eventlist[evid]) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_add_event", 1);
if (PAPI_get_event_info (polybench_papi_eventlist[evid], &evinfo)
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_get_event_info", retval);
if ((retval = PAPI_start (polybench_papi_eventset)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_start", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
return 0;
}
void polybench_papi_stop_counter(int evid)
{
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num () == polybench_papi_counters_threadid)
{
# endif
int retval;
long_long values[1];
values[0] = 0;
if ((retval = PAPI_read (polybench_papi_eventset, &values[0]))
!= PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_read", retval);
if ((retval = PAPI_stop (polybench_papi_eventset, NULL)) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_stop", retval);
polybench_papi_values[evid] = values[0];
if ((retval = PAPI_remove_event
(polybench_papi_eventset,
polybench_papi_eventlist[evid])) != PAPI_OK)
test_fail (__FILE__, __LINE__, "PAPI_remove_event", retval);
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
void polybench_papi_print()
{
int verbose = 0;
# ifdef _OPENMP
# pragma omp parallel
{
if (omp_get_thread_num() == polybench_papi_counters_threadid)
{
#ifdef POLYBENCH_PAPI_VERBOSE
verbose = 1;
#endif
if (verbose)
printf ("On thread %d:\n", polybench_papi_counters_threadid);
#endif
int evid;
for (evid = 0; polybench_papi_eventlist[evid] != 0; ++evid)
{
if (verbose)
printf ("%s=", _polybench_papi_eventlist[evid]);
printf ("%llu ", polybench_papi_values[evid]);
if (verbose)
printf ("\n");
}
printf ("\n");
# ifdef _OPENMP
}
}
#pragma omp barrier
# endif
}
#endif
/* ! POLYBENCH_PAPI */
void polybench_prepare_instruments()
{
#ifndef POLYBENCH_NO_FLUSH_CACHE
polybench_flush_cache ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_fifo_scheduler ();
#endif
}
void polybench_timer_start()
{
//printf("In polybench timer start\n");
polybench_prepare_instruments ();
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_start = rtclock ();
#else
polybench_c_start = rdtsc ();
#endif
}
void polybench_timer_stop()
{
//printf("In polybench timer stop\n");
#ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
polybench_t_end = rtclock ();
#else
polybench_c_end = rdtsc ();
#endif
#ifdef POLYBENCH_LINUX_FIFO_SCHEDULER
polybench_linux_standard_scheduler ();
#endif
}
void polybench_timer_print()
{
#ifdef POLYBENCH_GFLOPS
if (__polybench_program_total_flops == 0)
{
printf ("[PolyBench][WARNING] Program flops not defined, use polybench_set_program_flops(value)\n");
printf ("%0.6lf\n", polybench_t_end - polybench_t_start);
}
else
printf ("%0.2lf\n",
(__polybench_program_total_flops /
(double)(polybench_t_end - polybench_t_start)) / 1000000000);
#else
# ifndef POLYBENCH_CYCLE_ACCURATE_TIMER
printf ("%0.6f\n", polybench_t_end - polybench_t_start);
# else
printf ("%Ld\n", polybench_c_end - polybench_c_start);
# endif
#endif
}
static
void *
xmalloc (size_t num)
{
void* new = NULL;
int ret = posix_memalign (&new, 32, num);
if (! new || ret)
{
fprintf (stderr, "[PolyBench] posix_memalign: cannot allocate memory");
exit (1);
}
return new;
}
void* polybench_alloc_data(unsigned long long int n, int elt_size)
{
/// FIXME: detect overflow!
size_t val = n;
val *= elt_size;
void* ret = xmalloc (val);
return ret;
}
|
npdot.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <complex.h>
//#include <omp.h>
#include "config.h"
#include "vhf/fblas.h"
#define MIN(X,Y) ((X) < (Y) ? (X) : (Y))
#define MAX(X,Y) ((X) > (Y) ? (X) : (Y))
/*
* numpy.dot may call unoptimized blas
*/
void NPdgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double *a, double *b, double *c,
const double alpha, const double beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (beta == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta;
}
}
}
#pragma omp parallel private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double D0 = 0;
double *cpriv = malloc(sizeof(double) * (m*n+2));
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &n, &di,
&alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&D0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n*2) { // parallelize m
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &di, &n, &k,
&alpha, a+bstride*i, &lda, b, &ldb,
&beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
dgemm_(&trans_a, &trans_b, &m, &di, &k,
&alpha, a, &lda, b+bstride*i, &ldb,
&beta, c+cstride*i, &ldc);
}
}
}
}
}
void NPzgemm(const char trans_a, const char trans_b,
const int m, const int n, const int k,
const int lda, const int ldb, const int ldc,
const int offseta, const int offsetb, const int offsetc,
double complex *a, double complex *b, double complex *c,
const double complex *alpha, const double complex *beta)
{
const size_t dimc = ldc;
int i, j;
if (m == 0 || n == 0) {
return;
} else if (k == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
} }
return;
}
a += offseta;
b += offsetb;
c += offsetc;
if ((k/m) > 3 && (k/n) > 3) { // parallelize k
if (creal(*beta) == 0 && cimag(*beta) == 0) {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] = 0;
}
}
} else {
for (i = 0; i < n; i++) {
for (j = 0; j < m; j++) {
c[i*dimc+j] *= beta[0];
}
}
}
#pragma omp parallel private(i, j)
{
int nthread = omp_get_num_threads();
int nblk = MAX((k+nthread-1) / nthread, 1);
double complex Z0 = 0;
double complex *cpriv = malloc(sizeof(double complex) * (m*n+2));
int di;
size_t ij;
size_t astride = nblk;
size_t bstride = nblk;
if (trans_a == 'N') {
astride *= lda;
}
if (trans_b != 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, k-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &n, &di,
alpha, a+astride*i, &lda,
b+bstride*i, &ldb,
&Z0, cpriv, &m);
}
}
#pragma omp critical
if (di > 0) {
for (ij = 0, i = 0; i < n; i++) {
for (j = 0; j < m; j++, ij++) {
c[i*dimc+j] += cpriv[ij];
}
}
}
free(cpriv);
}
} else if (m > n*2) { // parallelize m
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((m+nthread-1) / nthread, 1);
nthread = (m+nblk-1) / nblk;
int di;
size_t bstride = nblk;
if (trans_a != 'N') {
bstride *= lda;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, m-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &di, &n, &k,
alpha, a+bstride*i, &lda, b, &ldb,
beta, c+i*nblk, &ldc);
}
}
}
} else { // parallelize n
#pragma omp parallel
{
int nthread = omp_get_num_threads();
int nblk = MAX((n+nthread-1) / nthread, 1);
nthread = (n+nblk-1) / nblk;
int di;
size_t bstride = nblk;
size_t cstride = dimc * nblk;
if (trans_b == 'N') {
bstride *= ldb;
}
#pragma omp for
for (i = 0; i < nthread; i++) {
di = MIN(nblk, n-i*nblk);
if (di > 0) {
zgemm_(&trans_a, &trans_b, &m, &di, &k,
alpha, a, &lda, b+bstride*i, &ldb,
beta, c+cstride*i, &ldc);
}
}
}
}
}
|
blas_dh.c | /*BHEADER**********************************************************************
* Copyright (c) 2008, Lawrence Livermore National Security, LLC.
* Produced at the Lawrence Livermore National Laboratory.
* This file is part of HYPRE. See file COPYRIGHT for details.
*
* HYPRE is free software; you can redistribute it and/or modify it under the
* terms of the GNU Lesser General Public License (as published by the Free
* Software Foundation) version 2.1 dated February 1999.
*
* $Revision$
***********************************************************************EHEADER*/
#include "_hypre_Euclid.h"
/* #include "blas_dh.h" */
#undef __FUNC__
#define __FUNC__ "matvec_euclid_seq"
void matvec_euclid_seq(HYPRE_Int n, HYPRE_Int *rp, HYPRE_Int *cval, HYPRE_Real *aval, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i, j;
HYPRE_Int from, to, col;
HYPRE_Real sum;
if (np_dh > 1) SET_V_ERROR("only for sequential case!\n");
#ifdef USING_OPENMP_DH
#pragma omp parallel private(j, col, sum, from, to) \
default(shared) \
firstprivate(n, rp, cval, aval, x, y)
#endif
{
#ifdef USING_OPENMP_DH
#pragma omp for schedule(static)
#endif
for (i=0; i<n; ++i) {
sum = 0.0;
from = rp[i];
to = rp[i+1];
for (j=from; j<to; ++j) {
col = cval[j];
sum += (aval[j]*x[col]);
}
y[i] = sum;
}
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "Axpy"
void Axpy(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x, y) \
private(i)
#endif
for (i=0; i<n; ++i) {
y[i] = alpha*x[i] + y[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "CopyVec"
void CopyVec(HYPRE_Int n, HYPRE_Real *xIN, HYPRE_Real *yOUT)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(yOUT, xIN) \
private(i)
#endif
for (i=0; i<n; ++i) {
yOUT[i] = xIN[i];
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "ScaleVec"
void ScaleVec(HYPRE_Int n, HYPRE_Real alpha, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(alpha, x) \
private(i)
#endif
for (i=0; i<n; ++i) {
x[i] *= alpha;
}
END_FUNC_DH
}
#undef __FUNC__
#define __FUNC__ "InnerProd"
HYPRE_Real InnerProd(HYPRE_Int n, HYPRE_Real *x, HYPRE_Real *y)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x, y) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += x[i] * y[i];
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_DOUBLE, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
END_FUNC_VAL(result)
}
#undef __FUNC__
#define __FUNC__ "Norm2"
HYPRE_Real Norm2(HYPRE_Int n, HYPRE_Real *x)
{
START_FUNC_DH
HYPRE_Real result, local_result = 0.0;
HYPRE_Int i;
#ifdef USING_OPENMP_DH
#pragma omp parallel for schedule(static) firstprivate(x) \
private(i) \
reduction(+:local_result)
#endif
for (i=0; i<n; ++i) {
local_result += (x[i]*x[i]);
}
if (np_dh > 1) {
hypre_MPI_Allreduce(&local_result, &result, 1, hypre_MPI_DOUBLE, hypre_MPI_SUM, comm_dh);
} else {
result = local_result;
}
result = sqrt(result);
END_FUNC_VAL(result)
}
|
sum_openmp.c | /*
Copyright (C) 2018 Francesc Alted
http://blosc.org
License: BSD 3-Clause (see LICENSE.txt)
Example program showing how to operate with compressed buffers.
To compile this program for synthetic data (default):
$ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2
To run:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 199950000000
Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s
Compression ratio: 762.9 MB -> 14.0 MB (54.6x)
Compression time: 0.288 s, 2653.5 MB/s
Sum for *compressed* data: 199950000000
Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s
To use real (rainfall) data:
$ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp
And running it:
$ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp
Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$)
Sum for uncompressed data: 29741012
Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s
Compression ratio: 381.5 MB -> 71.3 MB (5.3x)
Compression time: 1.53 s, 249.1 MB/s
Sum for *compressed* data: 29741012
Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s
*/
#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <sys/stat.h>
#include <errno.h>
#include <assert.h>
#include "blosc2.h"
#define KB 1024.
#define MB (1024*KB)
#define GB (1024*MB)
#define N (100 * 1000 * 1000)
#define CHUNKSIZE (4 * 1000)
#define NCHUNKS (N / CHUNKSIZE)
#define NTHREADS 8
#define NITER 5
#ifdef RAINFALL
#define SYNTHETIC false
#else
#define SYNTHETIC true
#endif
#if SYNTHETIC == true
#define DTYPE int64_t
#define CLEVEL 3
#define CODEC BLOSC_BLOSCLZ
#else
#define DTYPE float
#define CLEVEL 1
#define CODEC BLOSC_LZ4HC
#endif
int main() {
static DTYPE udata[N];
DTYPE chunk_buf[CHUNKSIZE];
size_t isize = CHUNKSIZE * sizeof(DTYPE);
DTYPE sum, compressed_sum;
int64_t nbytes, cbytes;
blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS;
blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS;
blosc2_schunk* schunk;
int i, j, nchunk;
blosc_timestamp_t last, current;
double ttotal, itotal;
printf("Blosc version info: %s (%s)\n",
BLOSC_VERSION_STRING, BLOSC_VERSION_DATE);
// Fill the buffer for a chunk
if (SYNTHETIC) {
for (j = 0; j < CHUNKSIZE; j++) {
chunk_buf[j] = j;
}
}
else {
struct stat info;
const char *filegrid = "rainfall-grid-150x150.bin";
if (stat(filegrid, &info) != 0) {
printf("Grid file %s not found!", filegrid);
exit(1);
}
char *cdata = malloc(info.st_size);
FILE *f = fopen(filegrid, "rb");
size_t blocks_read = fread(cdata, info.st_size, 1, f);
assert(blocks_read == 1);
fclose(f);
int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf);
if (dsize < 0) {
printf("blosc_getitem() error. Error code: %d\n. Probaly reading too much data?", dsize);
exit(1);
}
free(cdata);
}
// Fill the uncompressed dataset with data chunks
for (i = 0; i < N / CHUNKSIZE; i++) {
for (j = 0; j < CHUNKSIZE; j++) {
udata[i * CHUNKSIZE + j] = chunk_buf[j];
}
}
// Reduce uncompressed dataset
ttotal = 1e10;
sum = 0;
for (int n = 0; n < NITER; n++) {
sum = 0;
blosc_set_timestamp(&last);
#pragma omp parallel for reduction (+:sum)
for (i = 0; i < N; i++) {
sum += udata[i];
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for uncompressed data: %10.0f\n", (double)sum);
printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n",
ttotal, (isize * NCHUNKS) / (ttotal * (double)MB));
// Create a super-chunk container for the compressed container
cparams.typesize = sizeof(DTYPE);
cparams.compcode = CODEC;
cparams.clevel = CLEVEL;
cparams.nthreads = 1;
dparams.nthreads = 1;
blosc_set_timestamp(&last);
schunk = blosc2_new_schunk(cparams, dparams, NULL);
for (nchunk = 0; nchunk < NCHUNKS; nchunk++) {
for (i = 0; i < CHUNKSIZE; i++) {
chunk_buf[i] = udata[i + nchunk * CHUNKSIZE];
}
blosc2_schunk_append_buffer(schunk, chunk_buf, isize);
}
blosc_set_timestamp(¤t);
ttotal = blosc_elapsed_secs(last, current);
nbytes = schunk->nbytes;
cbytes = schunk->cbytes;
printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n",
nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes);
printf("Compression time: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
int nthreads = NTHREADS;
char* envvar = getenv("OMP_NUM_THREADS");
if (envvar != NULL) {
long value;
value = strtol(envvar, NULL, 10);
if ((value != EINVAL) && (value >= 0)) {
nthreads = (int)value;
}
}
// Build buffers and contexts for computations
int nchunks_thread = NCHUNKS / nthreads;
int remaining_chunks = NCHUNKS - nchunks_thread * nthreads;
blosc2_context **dctx = malloc(nthreads * sizeof(void*));
DTYPE** chunk = malloc(nthreads * sizeof(void*));
for (j = 0; j < nthreads; j++) {
chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE));
}
// Reduce uncompressed dataset
blosc_set_timestamp(&last);
ttotal = 1e10;
compressed_sum = 0;
for (int n = 0; n < NITER; n++) {
compressed_sum = 0;
#pragma omp parallel for private(nchunk) reduction (+:compressed_sum)
for (j = 0; j < nthreads; j++) {
dctx[j] = blosc2_create_dctx(dparams);
for (nchunk = 0; nchunk < nchunks_thread; nchunk++) {
blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk],
(void*)(chunk[j]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[j][i];
//compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE;
}
}
}
for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) {
blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], (void*)(chunk[0]), isize);
for (i = 0; i < CHUNKSIZE; i++) {
compressed_sum += chunk[0][i];
//compressed_sum += i + nchunk * CHUNKSIZE;
}
}
blosc_set_timestamp(¤t);
itotal = blosc_elapsed_secs(last, current);
if (itotal < ttotal) ttotal = itotal;
}
printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum);
printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n",
ttotal, nbytes / (ttotal * MB));
//printf("sum, csum: %f, %f\n", sum, compressed_sum);
if (SYNTHETIC) {
// difficult to fulfill for single precision
assert(sum == compressed_sum);
}
/* Free resources */
blosc2_free_schunk(schunk);
return 0;
}
|
compare.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC OOO M M PPPP AAA RRRR EEEEE %
% C O O MM MM P P A A R R E %
% C O O M M M PPPP AAAAA RRRR EEE %
% C O O M M P A A R R E %
% CCCC OOO M M P A A R R EEEEE %
% %
% %
% MagickCore Image Comparison Methods %
% %
% Software Design %
% Cristy %
% December 2003 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/compare.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/constitute.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/resource_.h"
#include "MagickCore/string_.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o m p a r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CompareImages() compares one or more pixel channels of an image to a
% reconstructed image and returns the difference image.
%
% The format of the CompareImages method is:
%
% Image *CompareImages(const Image *image,const Image *reconstruct_image,
% const MetricType metric,double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) != 0)
channels++;
}
return(channels == 0 ? (size_t) 1 : channels);
}
MagickExport Image *CompareImages(Image *image,const Image *reconstruct_image,
const MetricType metric,double *distortion,ExceptionInfo *exception)
{
CacheView
*highlight_view,
*image_view,
*reconstruct_view;
const char
*artifact;
double
fuzz;
Image
*clone_image,
*difference_image,
*highlight_image;
MagickBooleanType
status;
PixelInfo
highlight,
lowlight,
masklight;
RectangleInfo
geometry;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageDistortion(image,reconstruct_image,metric,distortion,
exception);
if (status == MagickFalse)
return((Image *) NULL);
columns=MagickMax(image->columns,reconstruct_image->columns);
rows=MagickMax(image->rows,reconstruct_image->rows);
SetGeometry(image,&geometry);
geometry.width=columns;
geometry.height=rows;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageMask(clone_image,ReadPixelMask,(Image *) NULL,exception);
difference_image=ExtentImage(clone_image,&geometry,exception);
clone_image=DestroyImage(clone_image);
if (difference_image == (Image *) NULL)
return((Image *) NULL);
(void) SetImageAlphaChannel(difference_image,OpaqueAlphaChannel,exception);
highlight_image=CloneImage(image,columns,rows,MagickTrue,exception);
if (highlight_image == (Image *) NULL)
{
difference_image=DestroyImage(difference_image);
return((Image *) NULL);
}
status=SetImageStorageClass(highlight_image,DirectClass,exception);
if (status == MagickFalse)
{
difference_image=DestroyImage(difference_image);
highlight_image=DestroyImage(highlight_image);
return((Image *) NULL);
}
(void) SetImageMask(highlight_image,ReadPixelMask,(Image *) NULL,exception);
(void) SetImageAlphaChannel(highlight_image,OpaqueAlphaChannel,exception);
(void) QueryColorCompliance("#f1001ecc",AllCompliance,&highlight,exception);
artifact=GetImageArtifact(image,"compare:highlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&highlight,exception);
(void) QueryColorCompliance("#ffffffcc",AllCompliance,&lowlight,exception);
artifact=GetImageArtifact(image,"compare:lowlight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&lowlight,exception);
(void) QueryColorCompliance("#888888cc",AllCompliance,&masklight,exception);
artifact=GetImageArtifact(image,"compare:masklight-color");
if (artifact != (const char *) NULL)
(void) QueryColorCompliance(artifact,AllCompliance,&masklight,exception);
/*
Generate difference image.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
highlight_view=AcquireAuthenticCacheView(highlight_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,highlight_image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p,
*magick_restrict q;
register Quantum
*magick_restrict r;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
r=QueueCacheViewAuthenticPixels(highlight_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL) ||
(r == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickStatusType
difference;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
SetPixelViaPixelInfo(highlight_image,&masklight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
difference=MagickTrue;
break;
}
}
if (difference == MagickFalse)
SetPixelViaPixelInfo(highlight_image,&lowlight,r);
else
SetPixelViaPixelInfo(highlight_image,&highlight,r);
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
r+=GetPixelChannels(highlight_image);
}
sync=SyncCacheViewAuthenticPixels(highlight_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
highlight_view=DestroyCacheView(highlight_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
(void) CompositeImage(difference_image,highlight_image,image->compose,
MagickTrue,0,0,exception);
(void) SetImageAlphaChannel(difference_image,OffAlphaChannel,exception);
highlight_image=DestroyImage(highlight_image);
if (status == MagickFalse)
difference_image=DestroyImage(difference_image);
return(difference_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortion() compares one or more pixel channels of an image to a
% reconstructed image and returns the specified distortion metric.
%
% The format of the GetImageDistortion method is:
%
% MagickBooleanType GetImageDistortion(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% double *distortion,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o distortion: the computed distortion between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
fuzz;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
/*
Compute the absolute difference in pixels between two images.
*/
status=MagickTrue;
fuzz=GetFuzzyColorDistance(image,reconstruct_image);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
MagickBooleanType
difference;
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
difference=MagickFalse;
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q);
if ((distance*distance) > fuzz)
{
channel_distortion[i]++;
difference=MagickTrue;
}
}
if (difference != MagickFalse)
channel_distortion[CompositePixelChannel]++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static MagickBooleanType GetFuzzDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetFuzzDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]);
return(status);
}
static MagickBooleanType GetMeanAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance;
channel_distortion[CompositePixelChannel]+=distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
return(status);
}
static MagickBooleanType GetMeanErrorPerPixel(Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
double
area,
maximum_error,
mean_error;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
area=0.0;
maximum_error=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,channel,q));
distortion[i]+=distance;
distortion[CompositePixelChannel]+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=distortion[CompositePixelChannel]/area;
image->error.normalized_mean_error=QuantumScale*QuantumScale*mean_error/area;
image->error.normalized_maximum_error=QuantumScale*maximum_error;
return(status);
}
static MagickBooleanType GetMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area;
MagickBooleanType
status;
register ssize_t
j;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1) reduction(+:area)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
channel_distortion[i]+=distance*distance;
channel_distortion[CompositePixelChannel]+=distance*distance;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetMeanSquaredError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
area=PerceptibleReciprocal(area);
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]*=area;
distortion[CompositePixelChannel]/=GetImageChannels(image);
return(status);
}
static MagickBooleanType GetNormalizedCrossCorrelationDistortion(
const Image *image,const Image *reconstruct_image,double *distortion,
ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*image_view,
*reconstruct_view;
ChannelStatistics
*image_statistics,
*reconstruct_statistics;
double
area;
MagickBooleanType
status;
MagickOffsetType
progress;
register ssize_t
i;
size_t
columns,
rows;
ssize_t
y;
/*
Normalize to account for variation due to lighting and exposure condition.
*/
image_statistics=GetImageStatistics(image,exception);
reconstruct_statistics=GetImageStatistics(reconstruct_image,exception);
if ((image_statistics == (ChannelStatistics *) NULL) ||
(reconstruct_statistics == (ChannelStatistics *) NULL))
{
if (image_statistics != (ChannelStatistics *) NULL)
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
if (reconstruct_statistics != (ChannelStatistics *) NULL)
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
return(MagickFalse);
}
status=MagickTrue;
progress=0;
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
area=0.0;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
area++;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
area=PerceptibleReciprocal(area);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*(image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(image,p) : OpaqueAlpha);
Da=QuantumScale*(reconstruct_image->alpha_trait != UndefinedPixelTrait ?
GetPixelAlpha(reconstruct_image,q) : OpaqueAlpha);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
if (channel == AlphaPixelChannel)
{
distortion[i]+=area*QuantumScale*(p[i]-
image_statistics[channel].mean)*(GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
else
{
distortion[i]+=area*QuantumScale*(Sa*p[i]-
image_statistics[channel].mean)*(Da*GetPixelChannel(
reconstruct_image,channel,q)-
reconstruct_statistics[channel].mean);
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
proceed=SetImageProgress(image,SimilarityImageTag,progress++,rows);
if (proceed == MagickFalse)
{
status=MagickFalse;
break;
}
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
/*
Divide by the standard deviation.
*/
distortion[CompositePixelChannel]=0.0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
gamma;
PixelChannel channel = GetPixelChannelChannel(image,i);
gamma=image_statistics[channel].standard_deviation*
reconstruct_statistics[channel].standard_deviation;
gamma=PerceptibleReciprocal(gamma);
distortion[i]=QuantumRange*gamma*distortion[i];
distortion[CompositePixelChannel]+=distortion[i]*distortion[i];
}
distortion[CompositePixelChannel]=sqrt(distortion[CompositePixelChannel]/
GetImageChannels(image));
/*
Free resources.
*/
reconstruct_statistics=(ChannelStatistics *) RelinquishMagickMemory(
reconstruct_statistics);
image_statistics=(ChannelStatistics *) RelinquishMagickMemory(
image_statistics);
return(status);
}
static MagickBooleanType GetPeakAbsoluteDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
status=MagickTrue;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,rows,1)
#endif
for (y=0; y < (ssize_t) rows; y++)
{
double
channel_distortion[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
j,
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
continue;
}
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (x=0; x < (ssize_t) columns; x++)
{
double
Da,
Sa;
register ssize_t
i;
if ((GetPixelReadMask(image,p) <= (QuantumRange/2)) ||
(GetPixelReadMask(reconstruct_image,q) <= (QuantumRange/2)))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
Sa=QuantumScale*GetPixelAlpha(image,p);
Da=QuantumScale*GetPixelAlpha(reconstruct_image,q);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=QuantumScale*fabs(Sa*p[i]-Da*GetPixelChannel(reconstruct_image,
channel,q));
if (distance > channel_distortion[i])
channel_distortion[i]=distance;
if (distance > channel_distortion[CompositePixelChannel])
channel_distortion[CompositePixelChannel]=distance;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPeakAbsoluteError)
#endif
for (j=0; j <= MaxPixelChannels; j++)
if (channel_distortion[j] > distortion[j])
distortion[j]=channel_distortion[j];
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(status);
}
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
static MagickBooleanType GetPeakSignalToNoiseRatio(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
if (fabs(distortion[i]) < MagickEpsilon)
distortion[i]=INFINITY;
else
distortion[i]=20.0*MagickLog10(1.0/sqrt(distortion[i]));
return(status);
}
static MagickBooleanType GetPerceptualHashDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
ChannelPerceptualHash
*channel_phash,
*reconstruct_phash;
const char
*artifact;
MagickBooleanType
normalize;
ssize_t
channel;
/*
Compute perceptual hash in the sRGB colorspace.
*/
channel_phash=GetImagePerceptualHash(image,exception);
if (channel_phash == (ChannelPerceptualHash *) NULL)
return(MagickFalse);
reconstruct_phash=GetImagePerceptualHash(reconstruct_image,exception);
if (reconstruct_phash == (ChannelPerceptualHash *) NULL)
{
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
channel_phash);
return(MagickFalse);
}
artifact=GetImageArtifact(image,"phash:normalize");
normalize=(artifact == (const char *) NULL) ||
(IsStringTrue(artifact) == MagickFalse) ? MagickFalse : MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4)
#endif
for (channel=0; channel < MaxPixelChannels; channel++)
{
double
difference;
register ssize_t
i;
difference=0.0;
for (i=0; i < MaximumNumberOfImageMoments; i++)
{
double
alpha,
beta;
register ssize_t
j;
for (j=0; j < (ssize_t) channel_phash[0].number_colorspaces; j++)
{
alpha=channel_phash[channel].phash[j][i];
beta=reconstruct_phash[channel].phash[j][i];
if (normalize == MagickFalse)
difference+=(beta-alpha)*(beta-alpha);
else
difference=sqrt((beta-alpha)*(beta-alpha)/
channel_phash[0].number_channels);
}
}
distortion[channel]+=difference;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetPerceptualHashDistortion)
#endif
distortion[CompositePixelChannel]+=difference;
}
/*
Free resources.
*/
reconstruct_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(
reconstruct_phash);
channel_phash=(ChannelPerceptualHash *) RelinquishMagickMemory(channel_phash);
return(MagickTrue);
}
static MagickBooleanType GetRootMeanSquaredDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetMeanSquaredDistortion(image,reconstruct_image,distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=sqrt(distortion[i]);
return(status);
}
static MagickBooleanType GetStructuralSimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
#define SSIMRadius 5.0
#define SSIMSigma 1.5
#define SSIMBlocksize 8
#define SSIMK1 0.01
#define SSIMK2 0.03
#define SSIML 1.0
CacheView
*image_view,
*reconstruct_view;
char
geometry[MagickPathExtent];
const char
*artifact;
double
c1,
c2,
radius,
sigma;
KernelInfo
*kernel_info;
MagickBooleanType
status;
register ssize_t
i;
ssize_t
tile_y;
/*
Compute structural similarity index.
*/
radius=SSIMRadius;
artifact=GetImageArtifact(image,"compare:ssim-radius");
if (artifact != (const char *) NULL)
radius=StringToDouble(artifact,(char **) NULL);
sigma=SSIMSigma;
artifact=GetImageArtifact(image,"compare:ssim-sigma");
if (artifact != (const char *) NULL)
sigma=StringToDouble(artifact,(char **) NULL);
(void) FormatLocaleString(geometry,MagickPathExtent,"gaussian:%.20gx%.20g",
radius,sigma);
kernel_info=AcquireKernelInfo(geometry,exception);
if (kernel_info == (KernelInfo *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
c1=pow(SSIMK1*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k1");
if (artifact != (const char *) NULL)
c1=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
c2=pow(SSIMK2*SSIML,2.0);
artifact=GetImageArtifact(image,"compare:ssim-k2");
if (artifact != (const char *) NULL)
c2=pow(StringToDouble(artifact,(char **) NULL)*SSIML,2.0);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status) \
magick_threads(image,image,1,1)
#endif
for (tile_y=0; tile_y < (ssize_t) image->rows; tile_y+=(ssize_t) kernel_info->height)
{
double
channel_distortion[MaxPixelChannels+1];
register ssize_t
tile_x;
ssize_t
j;
if (status == MagickFalse)
continue;
(void) ResetMagickMemory(channel_distortion,0,sizeof(channel_distortion));
for (tile_x=0; tile_x < (ssize_t) image->columns; tile_x+=(ssize_t) kernel_info->width)
{
double
image_sum[MaxPixelChannels+1],
image_sum_squared[MaxPixelChannels+1],
reconstruct_sum[MaxPixelChannels+1],
reconstruct_sum_squared[MaxPixelChannels+1],
sum[MaxPixelChannels+1];
register const Quantum
*magick_restrict p,
*magick_restrict q;
register double
*k;
register ssize_t
y;
p=GetCacheViewVirtualPixels(image_view,tile_x,tile_y,kernel_info->width,
kernel_info->height,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,tile_x,tile_y,
kernel_info->width,kernel_info->height,exception);
if ((p == (const Quantum *) NULL) || (q == (const Quantum *) NULL))
{
status=MagickFalse;
break;
}
(void) ResetMagickMemory(image_sum,0,(MaxPixelChannels+1)*
sizeof(*image_sum));
(void) ResetMagickMemory(image_sum_squared,0,(MaxPixelChannels+1)*
sizeof(*image_sum_squared));
(void) ResetMagickMemory(reconstruct_sum,0,(MaxPixelChannels+1)*
sizeof(*reconstruct_sum));
(void) ResetMagickMemory(reconstruct_sum_squared,0,(MaxPixelChannels+1)*
sizeof(*reconstruct_sum_squared));
(void) ResetMagickMemory(sum,0,(MaxPixelChannels+1)*sizeof(*sum));
k=kernel_info->values;
for (y=0; y < (ssize_t) kernel_info->height; y++)
{
register ssize_t
x;
for (x=0; x < (ssize_t) kernel_info->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(
reconstruct_image,channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
image_sum[i]+=(*k)*p[i];
image_sum_squared[i]+=((*k)*p[i]*(*k)*p[i]);
reconstruct_sum[i]+=(*k)*GetPixelChannel(reconstruct_image,channel,
q);
reconstruct_sum_squared[i]+=((*k)*GetPixelChannel(reconstruct_image,
channel,q)*(*k)*GetPixelChannel(reconstruct_image,channel,q));
sum[i]+=((*k)*p[i]*(*k)*GetPixelChannel(reconstruct_image,channel,
q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
k++;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
covarience,
image_mean,
image_variance,
reconstruct_mean,
reconstruct_variance;
size_t
number_samples;
number_samples=kernel_info->width*kernel_info->height;
image_mean=image_sum[i]/number_samples;
image_variance=image_sum_squared[i]/number_samples-(image_mean*
image_mean);
reconstruct_mean=reconstruct_sum[i]/number_samples;
reconstruct_variance=reconstruct_sum_squared[i]/number_samples-
(reconstruct_mean*reconstruct_mean);
covarience=sum[i]/number_samples-(image_mean*reconstruct_mean);
channel_distortion[i]+=((2.0*image_mean*reconstruct_mean+c1)*(2.0*
covarience+c2))/((image_mean*image_mean+reconstruct_mean*
reconstruct_mean+c1)*(image_variance+reconstruct_variance+c2));
}
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetStructuralSimilarityDistortion)
#endif
for (j=0; j <= MaxPixelChannels; j++)
distortion[j]+=channel_distortion[j];
}
image_view=DestroyCacheView(image_view);
reconstruct_view=DestroyCacheView(reconstruct_view);
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0))
continue;
distortion[i]/=(double) (kernel_info->height*(image->rows/
kernel_info->height+1)*(image->columns/kernel_info->width+1));
distortion[CompositePixelChannel]+=distortion[i];
}
distortion[CompositePixelChannel]/=(double) GetImageChannels(image);
kernel_info=DestroyKernelInfo(kernel_info);
return(status);
}
static MagickBooleanType GetStructuralDisimilarityDistortion(const Image *image,
const Image *reconstruct_image,double *distortion,ExceptionInfo *exception)
{
MagickBooleanType
status;
register ssize_t
i;
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
distortion,exception);
for (i=0; i <= MaxPixelChannels; i++)
distortion[i]=(1.0-(distortion[i]))/2.0;
return(status);
}
MagickExport MagickBooleanType GetImageDistortion(Image *image,
const Image *reconstruct_image,const MetricType metric,double *distortion,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
assert(distortion != (double *) NULL);
*distortion=0.0;
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetPerceptualHashDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
*distortion=channel_distortion[CompositePixelChannel];
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
(void) FormatImageProperty(image,"distortion","%.*g",GetMagickPrecision(),
*distortion);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e D i s t o r t i o n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDistortions() compares the pixel channels of an image to a
% reconstructed image and returns the specified distortion metric for each
% channel.
%
% The format of the GetImageDistortions method is:
%
% double *GetImageDistortions(const Image *image,
% const Image *reconstruct_image,const MetricType metric,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o metric: the metric.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport double *GetImageDistortions(Image *image,
const Image *reconstruct_image,const MetricType metric,
ExceptionInfo *exception)
{
double
*channel_distortion;
MagickBooleanType
status;
size_t
length;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
/*
Get image distortion.
*/
length=MaxPixelChannels+1UL;
channel_distortion=(double *) AcquireQuantumMemory(length,
sizeof(*channel_distortion));
if (channel_distortion == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(channel_distortion,0,length*
sizeof(*channel_distortion));
status=MagickTrue;
switch (metric)
{
case AbsoluteErrorMetric:
{
status=GetAbsoluteDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case FuzzErrorMetric:
{
status=GetFuzzDistortion(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanAbsoluteErrorMetric:
{
status=GetMeanAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case MeanErrorPerPixelErrorMetric:
{
status=GetMeanErrorPerPixel(image,reconstruct_image,channel_distortion,
exception);
break;
}
case MeanSquaredErrorMetric:
{
status=GetMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case NormalizedCrossCorrelationErrorMetric:
default:
{
status=GetNormalizedCrossCorrelationDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakAbsoluteErrorMetric:
{
status=GetPeakAbsoluteDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PeakSignalToNoiseRatioErrorMetric:
{
status=GetPeakSignalToNoiseRatio(image,reconstruct_image,
channel_distortion,exception);
break;
}
case PerceptualHashErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case RootMeanSquaredErrorMetric:
{
status=GetRootMeanSquaredDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralSimilarityErrorMetric:
{
status=GetStructuralSimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
case StructuralDissimilarityErrorMetric:
{
status=GetStructuralDisimilarityDistortion(image,reconstruct_image,
channel_distortion,exception);
break;
}
}
if (status == MagickFalse)
{
channel_distortion=(double *) RelinquishMagickMemory(channel_distortion);
return((double *) NULL);
}
return(channel_distortion);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e s E q u a l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImagesEqual() compare the pixels of two images and returns immediately
% if any pixel is not identical.
%
% The format of the IsImagesEqual method is:
%
% MagickBooleanType IsImagesEqual(const Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsImagesEqual(const Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
break;
}
if (i < (ssize_t) GetPixelChannels(image))
break;
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
if (x < (ssize_t) columns)
break;
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
return(y < (ssize_t) rows ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r M e t r i c %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColorMetric() measures the difference between colors at each pixel
% location of two images. A value other than 0 means the colors match
% exactly. Otherwise an error measure is computed by summing over all
% pixels in an image the distance squared in RGB space between each image
% pixel and its corresponding pixel in the reconstruct image. The error
% measure is assigned to these image members:
%
% o mean_error_per_pixel: The mean error for any single pixel in
% the image.
%
% o normalized_mean_error: The normalized mean quantization error for
% any single pixel in the image. This distance measure is normalized to
% a range between 0 and 1. It is independent of the range of red, green,
% and blue values in the image.
%
% o normalized_maximum_error: The normalized maximum quantization
% error for any single pixel in the image. This distance measure is
% normalized to a range between 0 and 1. It is independent of the range
% of red, green, and blue values in your image.
%
% A small normalized mean square error, accessed as
% image->normalized_mean_error, suggests the images are very similar in
% spatial layout and color.
%
% The format of the SetImageColorMetric method is:
%
% MagickBooleanType SetImageColorMetric(Image *image,
% const Image *reconstruct_image,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o reconstruct_image: the reconstruct image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColorMetric(Image *image,
const Image *reconstruct_image,ExceptionInfo *exception)
{
CacheView
*image_view,
*reconstruct_view;
double
area,
maximum_error,
mean_error,
mean_error_per_pixel;
MagickBooleanType
status;
size_t
columns,
rows;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(reconstruct_image != (const Image *) NULL);
assert(reconstruct_image->signature == MagickCoreSignature);
area=0.0;
maximum_error=0.0;
mean_error_per_pixel=0.0;
mean_error=0.0;
rows=MagickMax(image->rows,reconstruct_image->rows);
columns=MagickMax(image->columns,reconstruct_image->columns);
image_view=AcquireVirtualCacheView(image,exception);
reconstruct_view=AcquireVirtualCacheView(reconstruct_image,exception);
for (y=0; y < (ssize_t) rows; y++)
{
register const Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,columns,1,exception);
q=GetCacheViewVirtualPixels(reconstruct_view,0,y,columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
break;
for (x=0; x < (ssize_t) columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
distance;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait reconstruct_traits = GetPixelChannelTraits(reconstruct_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(reconstruct_traits == UndefinedPixelTrait) ||
((reconstruct_traits & UpdatePixelTrait) == 0))
continue;
distance=fabs(p[i]-(double) GetPixelChannel(reconstruct_image,
channel,q));
if (distance >= MagickEpsilon)
{
mean_error_per_pixel+=distance;
mean_error+=distance*distance;
if (distance > maximum_error)
maximum_error=distance;
}
area++;
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(reconstruct_image);
}
}
reconstruct_view=DestroyCacheView(reconstruct_view);
image_view=DestroyCacheView(image_view);
image->error.mean_error_per_pixel=(double) (mean_error_per_pixel/area);
image->error.normalized_mean_error=(double) (QuantumScale*QuantumScale*
mean_error/area);
image->error.normalized_maximum_error=(double) (QuantumScale*maximum_error);
status=image->error.mean_error_per_pixel == 0.0 ? MagickTrue : MagickFalse;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S i m i l a r i t y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SimilarityImage() compares the reference image of the image and returns the
% best match offset. In addition, it returns a similarity image such that an
% exact match location is completely white and if none of the pixels match,
% black, otherwise some gray level in-between.
%
% The format of the SimilarityImageImage method is:
%
% Image *SimilarityImage(const Image *image,const Image *reference,
% const MetricType metric,const double similarity_threshold,
% RectangleInfo *offset,double *similarity,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o reference: find an area of the image that closely resembles this image.
%
% o metric: the metric.
%
% o similarity_threshold: minimum distortion for (sub)image match.
%
% o offset: the best match offset of the reference image within the image.
%
% o similarity: the computed similarity between the images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static double GetSimilarityMetric(const Image *image,const Image *reference,
const MetricType metric,const ssize_t x_offset,const ssize_t y_offset,
ExceptionInfo *exception)
{
double
distortion;
Image
*similarity_image;
MagickBooleanType
status;
RectangleInfo
geometry;
SetGeometry(reference,&geometry);
geometry.x=x_offset;
geometry.y=y_offset;
similarity_image=CropImage(image,&geometry,exception);
if (similarity_image == (Image *) NULL)
return(0.0);
distortion=0.0;
status=GetImageDistortion(similarity_image,reference,metric,&distortion,
exception);
similarity_image=DestroyImage(similarity_image);
if (status == MagickFalse)
return(0.0);
return(distortion);
}
MagickExport Image *SimilarityImage(const Image *image,const Image *reference,
const MetricType metric,const double similarity_threshold,
RectangleInfo *offset,double *similarity_metric,ExceptionInfo *exception)
{
#define SimilarityImageTag "Similarity/Image"
CacheView
*similarity_view;
Image
*similarity_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(offset != (RectangleInfo *) NULL);
SetGeometry(reference,offset);
*similarity_metric=MagickMaximumValue;
similarity_image=CloneImage(image,image->columns-reference->columns+1,
image->rows-reference->rows+1,MagickTrue,exception);
if (similarity_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(similarity_image,DirectClass,exception);
if (status == MagickFalse)
{
similarity_image=DestroyImage(similarity_image);
return((Image *) NULL);
}
(void) SetImageAlphaChannel(similarity_image,DeactivateAlphaChannel,
exception);
/*
Measure similarity of reference image against image.
*/
status=MagickTrue;
progress=0;
similarity_view=AcquireAuthenticCacheView(similarity_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) \
shared(progress,status,similarity_metric) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) (image->rows-reference->rows+1); y++)
{
double
similarity;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
continue;
q=GetCacheViewAuthenticPixels(similarity_view,0,y,similarity_image->columns,
1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->columns-reference->columns+1); x++)
{
register ssize_t
i;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp flush(similarity_metric)
#endif
if (*similarity_metric <= similarity_threshold)
break;
similarity=GetSimilarityMetric(image,reference,metric,x,y,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
if ((metric == NormalizedCrossCorrelationErrorMetric) ||
(metric == UndefinedErrorMetric))
similarity=1.0-similarity;
if (similarity < *similarity_metric)
{
offset->x=x;
offset->y=y;
*similarity_metric=similarity;
}
if (metric == PerceptualHashErrorMetric)
similarity=MagickMin(0.01*similarity,1.0);
if (GetPixelWriteMask(similarity_image,q) <= (QuantumRange/2))
{
SetPixelBackgoundColor(similarity_image,q);
q+=GetPixelChannels(similarity_image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait similarity_traits=GetPixelChannelTraits(similarity_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(similarity_traits == UndefinedPixelTrait) ||
((similarity_traits & UpdatePixelTrait) == 0))
continue;
SetPixelChannel(similarity_image,channel,ClampToQuantum(QuantumRange-
QuantumRange*similarity),q);
}
q+=GetPixelChannels(similarity_image);
}
if (SyncCacheViewAuthenticPixels(similarity_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SimilarityImage)
#endif
proceed=SetImageProgress(image,SimilarityImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
similarity_view=DestroyCacheView(similarity_view);
(void) SetImageAlphaChannel(similarity_image,OffAlphaChannel,exception);
if (status == MagickFalse)
similarity_image=DestroyImage(similarity_image);
return(similarity_image);
}
|
compiler_cgen.c | /* Generated by Nim Compiler v0.15.0 */
/* (c) 2016 Andreas Rumpf */
/* The generated code is subject to the original license. */
#define NIM_INTBITS 64
#include "nimbase.h"
#include <string.h>
typedef struct Tcgen529027 Tcgen529027;
typedef struct TNimType TNimType;
typedef struct TNimNode TNimNode;
typedef struct Ropeobj178006 Ropeobj178006;
typedef struct NimStringDesc NimStringDesc;
typedef struct TGenericSeq TGenericSeq;
typedef struct Cell47305 Cell47305;
typedef struct Cellseq47321 Cellseq47321;
typedef struct Gcheap49818 Gcheap49818;
typedef struct Gcstack49816 Gcstack49816;
typedef struct Memregion29486 Memregion29486;
typedef struct Smallchunk29440 Smallchunk29440;
typedef struct Llchunk29480 Llchunk29480;
typedef struct Bigchunk29442 Bigchunk29442;
typedef struct Intset29414 Intset29414;
typedef struct Trunk29410 Trunk29410;
typedef struct Avlnode29484 Avlnode29484;
typedef struct Gcstat49814 Gcstat49814;
typedef struct Cellset47317 Cellset47317;
typedef struct Pagedesc47313 Pagedesc47313;
typedef struct Ttypeseq292836 Ttypeseq292836;
typedef struct Ttype292840 Ttype292840;
typedef struct Intset268030 Intset268030;
typedef struct Trunk268026 Trunk268026;
typedef struct Trunkseq268028 Trunkseq268028;
typedef struct Tpasscontext341002 Tpasscontext341002;
typedef struct Tsym292834 Tsym292834;
typedef struct Tidobj199004 Tidobj199004;
typedef struct TNimObject TNimObject;
typedef struct TY292929 TY292929;
typedef struct Tstrtable292806 Tstrtable292806;
typedef struct Tsymseq292804 Tsymseq292804;
typedef struct Tident199010 Tident199010;
typedef struct Tlineinfo191336 Tlineinfo191336;
typedef struct Tnode292802 Tnode292802;
typedef struct Tloc292816 Tloc292816;
typedef struct Tlib292820 Tlib292820;
typedef struct TY529153 TY529153;
typedef struct TY203018 TY203018;
typedef struct Tidtable292850 Tidtable292850;
typedef struct Tidpairseq292848 Tidpairseq292848;
typedef struct Tlinkedlist147013 Tlinkedlist147013;
typedef struct Tlistentry147007 Tlistentry147007;
typedef struct Tcproc529021 Tcproc529021;
typedef struct Tnodetable292862 Tnodetable292862;
typedef struct Tnodepairseq292860 Tnodepairseq292860;
typedef struct Debuginfo203009 Debuginfo203009;
typedef struct TY203021 TY203021;
typedef struct TY203023 TY203023;
typedef struct Tnodeseq292796 Tnodeseq292796;
typedef struct TY191350 TY191350;
typedef struct TY529095 TY529095;
typedef struct Trodreader332021 Trodreader332021;
typedef struct TY292960 TY292960;
typedef struct TY203017 TY203017;
typedef struct Enumdesc203007 Enumdesc203007;
typedef struct Tinfocc273008 Tinfocc273008;
typedef struct Tblock529019 Tblock529019;
typedef struct Ttraversalclosure537019 Ttraversalclosure537019;
typedef struct TY135002 TY135002;
typedef struct Tbitset339004 Tbitset339004;
typedef struct TY191612 TY191612;
typedef struct Tfileinfo191334 Tfileinfo191334;
typedef struct Tinfoos176035 Tinfoos176035;
typedef struct Tinfocpu176476 Tinfocpu176476;
typedef struct Tstrentry147009 Tstrentry147009;
typedef struct TY128506 TY128506;
typedef struct Basechunk29438 Basechunk29438;
typedef struct Freecell29430 Freecell29430;
typedef struct Tinstantiation292824 Tinstantiation292824;
typedef struct Tidpair292846 Tidpair292846;
typedef struct Tnodepair292858 Tnodepair292858;
typedef struct Filenamemapping203005 Filenamemapping203005;
typedef struct TY332033 TY332033;
typedef struct Tindex332019 Tindex332019;
typedef struct Tiitable299142 Tiitable299142;
typedef struct Tiipairseq299140 Tiipairseq299140;
typedef struct Table332054 Table332054;
typedef struct Keyvaluepairseq332057 Keyvaluepairseq332057;
typedef struct Memfile330202 Memfile330202;
typedef struct TY292961 TY292961;
typedef struct Tiipair299138 Tiipair299138;
typedef struct Keyvaluepair332060 Keyvaluepair332060;
typedef NU8 Tnimkind3403;
typedef NU8 Tnimtypeflag3409Set;
typedef N_NIMCALL_PTR(void, TY3489) (void* p0, NI op0);
typedef N_NIMCALL_PTR(void*, TY3494) (void* p0);
struct TNimType {
NI size;
Tnimkind3403 kind;
Tnimtypeflag3409Set flags;
TNimType* base;
TNimNode* node;
void* finalizer;
TY3489 marker;
TY3494 deepcopy;
};
typedef NU8 Tnimnodekind3405;
struct TNimNode {
Tnimnodekind3405 kind;
NI offset;
TNimType* typ;
NCSTRING name;
NI len;
TNimNode** sons;
};
typedef N_NIMCALL_PTR(void, Globalmarkerproc55802) (void);
struct TGenericSeq {
NI len;
NI reserved;
};
struct NimStringDesc {
TGenericSeq Sup;
NIM_CHAR data[SEQ_DECL_SIZE];
};
struct Cell47305 {
NI refcount;
TNimType* typ;
};
struct Cellseq47321 {
NI len;
NI cap;
Cell47305** d;
};
typedef Smallchunk29440* TY29501[512];
typedef Trunk29410* Trunkbuckets29412[256];
struct Intset29414 {
Trunkbuckets29412 data;
};
struct Memregion29486 {
NI minlargeobj;
NI maxlargeobj;
TY29501 freesmallchunks;
Llchunk29480* llmem;
NI currmem;
NI maxmem;
NI freemem;
NI lastsize;
Bigchunk29442* freechunkslist;
Intset29414 chunkstarts;
Avlnode29484* root;
Avlnode29484* deleted;
Avlnode29484* last;
Avlnode29484* freeavlnodes;
NIM_BOOL locked;
};
struct Gcstat49814 {
NI stackscans;
NI cyclecollections;
NI maxthreshold;
NI maxstacksize;
NI maxstackcells;
NI cycletablesize;
NI64 maxpause;
};
struct Cellset47317 {
NI counter;
NI max;
Pagedesc47313* head;
Pagedesc47313** data;
};
struct Gcheap49818 {
Gcstack49816* stack;
void* stackbottom;
NI cyclethreshold;
Cellseq47321 zct;
Cellseq47321 decstack;
Cellseq47321 tempstack;
NI recgclock;
Memregion29486 region;
Gcstat49814 stat;
Cellset47317 marked;
Cellseq47321 additionalroots;
};
struct Intset268030 {
NI counter;
NI max;
Trunk268026* head;
Trunkseq268028* data;
};
struct TNimObject {
TNimType* m_type;
};
struct Tidobj199004 {
TNimObject Sup;
NI id;
};
typedef NU8 Tsymkind292435;
struct Tstrtable292806 {
NI counter;
Tsymseq292804* data;
};
typedef NU16 Tmagic292524;
struct Tlineinfo191336 {
NI16 line;
NI16 col;
NI32 fileindex;
};
typedef NU32 Tsymflag292184Set;
typedef NU32 Toption169009Set;
typedef NU8 Tlockind292808;
typedef NU8 Tstorageloc292812;
typedef NU16 Tlocflag292810Set;
struct Tloc292816 {
Tlockind292808 k;
Tstorageloc292812 s;
Tlocflag292810Set flags;
Ttype292840* t;
Ropeobj178006* r;
};
struct Tsym292834 {
Tidobj199004 Sup;
Tsymkind292435 kind;
union{
struct {Ttypeseq292836* typeinstcache;
} S1;
struct {TY292929* procinstcache;
Tsym292834* gcunsafetyreason;
} S2;
struct {TY292929* usedgenerics;
Tstrtable292806 tab;
} S3;
struct {Tsym292834* guard;
NI bitsize;
} S4;
} kindU;
Tmagic292524 magic;
Ttype292840* typ;
Tident199010* name;
Tlineinfo191336 info;
Tsym292834* owner;
Tsymflag292184Set flags;
Tnode292802* ast;
Toption169009Set options;
NI position;
NI offset;
Tloc292816 loc;
Tlib292820* annex;
Tnode292802* constraint;
};
struct TY203018 {
NimStringDesc* Field0;
NI Field1;
};
struct Tpasscontext341002 {
TNimObject Sup;
NIM_BOOL fromcache;
};
typedef Ropeobj178006* Tcfilesections529009[18];
typedef NU8 Codegenflag529025Set;
struct Tidtable292850 {
NI counter;
Tidpairseq292848* data;
};
struct Tlinkedlist147013 {
Tlistentry147007* head;
Tlistentry147007* tail;
NI counter;
};
struct Tnodetable292862 {
NI counter;
Tnodepairseq292860* data;
};
typedef Ropeobj178006* TY529136[10];
struct Tcgen529027 {
Tpasscontext341002 Sup;
Tcfilesections529009 s;
Codegenflag529025Set flags;
Tsym292834* module;
NimStringDesc* filename;
NimStringDesc* cfilename;
Ropeobj178006* tmpbase;
Tidtable292850 typecache;
Tidtable292850 forwtypecache;
Intset268030 declaredthings;
Intset268030 declaredprotos;
Tlinkedlist147013 headerfiles;
Intset268030 typeinfomarker;
Tcproc529021* initproc;
Tcproc529021* postinitproc;
Tcproc529021* preinitproc;
Ttypeseq292836* typestack;
Tnodetable292862 datacache;
Tsymseq292804* forwardedprocs;
NI typenodes;
NI nimtypes;
Ropeobj178006* typenodesname;
Ropeobj178006* nimtypesname;
NI labels;
TY529136 extensionloaders;
Ropeobj178006* injectstmt;
};
struct Debuginfo203009 {
NI version;
TY203021* files;
TY203023* enums;
NIM_BOOL conflicts;
};
struct Tident199010 {
Tidobj199004 Sup;
NimStringDesc* s;
Tident199010* next;
NI h;
};
struct Tcproc529021 {
Tsym292834* prc;
NIM_BOOL beforeretneeded;
NIM_BOOL threadvaraccessed;
Tlineinfo191336 lastlineinfo;
Tnodeseq292796* nestedtrystmts;
NI inexceptblock;
TY191350* finallysafepoints;
NI labels;
TY529095* blocks;
NI breakidx;
Toption169009Set options;
NI maxframelen;
Tcgen529027* module;
NI withinloop;
NI splitdecls;
NI gcframeid;
Ropeobj178006* gcframetype;
};
typedef NU8 Tsymflag292184;
typedef NU8 Codegenflag529025;
typedef NU8 Toption169009;
typedef NU64 Tglobaloption169013Set;
typedef NU8 Tglobaloption169013;
typedef NU8 Tcommands169076;
typedef NU16 Tnodeflag292427Set;
typedef NU8 Tnodekind292020;
struct Tnode292802 {
Ttype292840* typ;
Tlineinfo191336 info;
Tnodeflag292427Set flags;
Tnodekind292020 kind;
union{
struct {NI64 intval;
} S1;
struct {NF floatval;
} S2;
struct {NimStringDesc* strval;
} S3;
struct {Tsym292834* sym;
} S4;
struct {Tident199010* ident;
} S5;
struct {Tnodeseq292796* sons;
} S6;
} kindU;
NimStringDesc* comment;
};
typedef Ropeobj178006* TY533289[1];
typedef NU8 Tlocflag292810;
struct Tlistentry147007 {
TNimObject Sup;
Tlistentry147007* prev;
Tlistentry147007* next;
};
typedef NU8 Tlibkind292818;
struct Tlib292820 {
Tlistentry147007 Sup;
Tlibkind292818 kind;
NIM_BOOL generated;
NIM_BOOL isoverriden;
Ropeobj178006* name;
Tnode292802* path;
};
typedef NU8 Tcfilesection529005;
typedef NU8 Ttypekind292244;
typedef NU8 Tcallingconvention292002;
typedef NU32 Ttypeflag292431Set;
struct Ttype292840 {
Tidobj199004 Sup;
Ttypekind292244 kind;
Tcallingconvention292002 callconv;
Ttypeflag292431Set flags;
Ttypeseq292836* sons;
Tnode292802* n;
Tsym292834* owner;
Tsym292834* sym;
Tsym292834* destructor;
Tsym292834* deepcopy;
Tsym292834* assignment;
TY292960* methods;
NI64 size;
NI16 align;
NI16 locklevel;
Tloc292816 loc;
};
typedef Ropeobj178006* TY532811[2];
typedef NU8 Tctypekind529007;
typedef NU64 Ttypekind292244Set;
typedef NU8 Ttypeflag292431;
typedef NimStringDesc* TY533943[14];
typedef NU8 Tprefereddesc320011;
typedef Ropeobj178006* TY178507[1];
struct Enumdesc203007 {
NI size;
NU32 owner;
NI id;
NimStringDesc* name;
TY203017* values;
};
typedef Ropeobj178006* TY535235[4];
typedef NimStringDesc* TY292016[10];
typedef Ropeobj178006* TY535238[3];
struct Ropeobj178006 {
TNimObject Sup;
Ropeobj178006* left;
Ropeobj178006* right;
NI length;
NimStringDesc* data;
};
typedef NU8 Tinfoccprop273004Set;
struct Tinfocc273008 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
NimStringDesc* Field12;
NimStringDesc* Field13;
NimStringDesc* Field14;
NimStringDesc* Field15;
NimStringDesc* Field16;
NimStringDesc* Field17;
NimStringDesc* Field18;
NimStringDesc* Field19;
Tinfoccprop273004Set Field20;
};
typedef Tinfocc273008 TY273427[13];
typedef NU8 Tsystemcc273002;
typedef NU8 Tnodeflag292427;
typedef NU8 Tcprocsection529011;
typedef Ropeobj178006* Tcprocsections529013[3];
struct Tblock529019 {
NI id;
Ropeobj178006* label;
Tcprocsections529013 sections;
NIM_BOOL isloop;
NI16 nestedtrystmts;
NI16 nestedexceptstmts;
NI16 framelen;
};
typedef NU8 Tgcmode169080;
typedef NU8 Ttypeinforeason537016;
struct Ttraversalclosure537019 {
Tcproc529021* p;
NimStringDesc* visitorfrmt;
};
typedef NU8 Ttypefieldresult320145;
typedef NU8 Tinfoccprop273004;
typedef Ropeobj178006* TY536847[6];
typedef Ropeobj178006* TY536401[7];
typedef Ropeobj178006* TY536475[5];
typedef NU16 Tmsgkind191002;
typedef NU8 Tassignmentflag538302Set;
typedef NU8 Tassignmentflag538302;
typedef NimStringDesc* TY552655[19];
typedef NimStringDesc* TY551642[3];
typedef NimStringDesc* TY556765[4];
typedef NimStringDesc* TY551828[42];
typedef NimStringDesc* TY551281[7];
typedef NU8 Trenderflag311004Set;
typedef NimStringDesc* TY557052[2];
typedef NU8 Tclosuretypekind535681;
typedef NimStringDesc* TY556428[6];
typedef NU8 Tanalysisresult473003;
typedef NU8 char136Set[32];
typedef NU8 Tdistinctcompare324427;
typedef NU8 Ttypecmpflag324429Set;
typedef NU16 Tspecialword275003;
typedef NU8 Tsystemos176004;
struct Tfileinfo191334 {
NimStringDesc* fullpath;
NimStringDesc* projpath;
NimStringDesc* shortname;
Ropeobj178006* quotedname;
Ropeobj178006* quotedfullname;
TY191350* lines;
NimStringDesc* dirtyfile;
};
typedef NU8 Tinfoosprop176031Set;
struct Tinfoos176035 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
NimStringDesc* Field3;
NimStringDesc* Field4;
NimStringDesc* Field5;
NimStringDesc* Field6;
NimStringDesc* Field7;
NimStringDesc* Field8;
NimStringDesc* Field9;
NimStringDesc* Field10;
NimStringDesc* Field11;
Tinfoosprop176031Set Field12;
};
typedef Tinfoos176035 TY176082[24];
typedef NU8 Tendian176474;
struct Tinfocpu176476 {
NimStringDesc* Field0;
NI Field1;
Tendian176474 Field2;
NI Field3;
NI Field4;
};
typedef Tinfocpu176476 TY176510[19];
typedef NU8 Tsystemcpu176452;
struct Tstrentry147009 {
Tlistentry147007 Sup;
NimStringDesc* data;
};
struct TY128506 {
NimStringDesc* Field0;
NimStringDesc* Field1;
NimStringDesc* Field2;
};
struct Gcstack49816 {
Gcstack49816* prev;
Gcstack49816* next;
void* starts;
void* pos;
NI maxstacksize;
};
struct Basechunk29438 {
NI prevsize;
NI size;
NIM_BOOL used;
};
struct Smallchunk29440 {
Basechunk29438 Sup;
Smallchunk29440* next;
Smallchunk29440* prev;
Freecell29430* freelist;
NI free;
NI acc;
NF data;
};
struct Llchunk29480 {
NI size;
NI acc;
Llchunk29480* next;
};
struct Bigchunk29442 {
Basechunk29438 Sup;
Bigchunk29442* next;
Bigchunk29442* prev;
NI align;
NF data;
};
typedef NI TY29419[8];
struct Trunk29410 {
Trunk29410* next;
NI key;
TY29419 bits;
};
typedef Avlnode29484* TY29491[2];
struct Avlnode29484 {
TY29491 link;
NI key;
NI upperbound;
NI level;
};
struct Pagedesc47313 {
Pagedesc47313* next;
NI key;
TY29419 bits;
};
struct Trunk268026 {
Trunk268026* next;
NI key;
TY29419 bits;
};
struct Tidpair292846 {
Tidobj199004* key;
TNimObject* val;
};
struct Tnodepair292858 {
NI h;
Tnode292802* key;
NI val;
};
struct Filenamemapping203005 {
NimStringDesc* package;
NimStringDesc* file;
NU32 mangled;
};
typedef NU8 Treasonforrecompile332002;
struct Tiitable299142 {
NI counter;
Tiipairseq299140* data;
};
struct Tindex332019 {
NI lastidxkey;
NI lastidxval;
Tiitable299142 tab;
NimStringDesc* r;
NI offset;
};
struct Table332054 {
Keyvaluepairseq332057* data;
NI counter;
};
struct Memfile330202 {
void* mem;
NI size;
int handle;
};
struct Trodreader332021 {
TNimObject Sup;
NI pos;
NCSTRING s;
Toption169009Set options;
Treasonforrecompile332002 reason;
TY332033* moddeps;
TY332033* files;
NI dataidx;
NI convertersidx;
NI initidx;
NI interfidx;
NI compilerprocsidx;
NI methodsidx;
NimStringDesc* filename;
Tindex332019 index;
Tindex332019 imports;
NI readerindex;
NI line;
NI moduleid;
Table332054 syms;
Memfile330202 memfile;
Tsymseq292804* methods;
NimStringDesc* origfile;
NIM_BOOL inviewmode;
};
struct TY292961 {
NI Field0;
Tsym292834* Field1;
};
struct Freecell29430 {
Freecell29430* next;
NI zerofield;
};
struct Tinstantiation292824 {
Tsym292834* sym;
Ttypeseq292836* concretetypes;
NI compilesid;
};
struct Tiipair299138 {
NI key;
NI val;
};
struct Keyvaluepair332060 {
NI Field0;
NI Field1;
Tsym292834* Field2;
};
struct Ttypeseq292836 {
TGenericSeq Sup;
Ttype292840* data[SEQ_DECL_SIZE];
};
struct TY529153 {
TGenericSeq Sup;
Tcgen529027* data[SEQ_DECL_SIZE];
};
struct Tsymseq292804 {
TGenericSeq Sup;
Tsym292834* data[SEQ_DECL_SIZE];
};
struct TY203017 {
TGenericSeq Sup;
TY203018 data[SEQ_DECL_SIZE];
};
struct TY135002 {
TGenericSeq Sup;
NimStringDesc* data[SEQ_DECL_SIZE];
};
struct Tbitset339004 {
TGenericSeq Sup;
NI8 data[SEQ_DECL_SIZE];
};
struct TY529095 {
TGenericSeq Sup;
Tblock529019 data[SEQ_DECL_SIZE];
};
struct TY191350 {
TGenericSeq Sup;
Ropeobj178006* data[SEQ_DECL_SIZE];
};
struct Tnodeseq292796 {
TGenericSeq Sup;
Tnode292802* data[SEQ_DECL_SIZE];
};
struct TY191612 {
TGenericSeq Sup;
Tfileinfo191334 data[SEQ_DECL_SIZE];
};
struct Trunkseq268028 {
TGenericSeq Sup;
Trunk268026* data[SEQ_DECL_SIZE];
};
struct TY292929 {
TGenericSeq Sup;
Tinstantiation292824* data[SEQ_DECL_SIZE];
};
struct Tidpairseq292848 {
TGenericSeq Sup;
Tidpair292846 data[SEQ_DECL_SIZE];
};
struct Tnodepairseq292860 {
TGenericSeq Sup;
Tnodepair292858 data[SEQ_DECL_SIZE];
};
struct TY203021 {
TGenericSeq Sup;
Filenamemapping203005 data[SEQ_DECL_SIZE];
};
struct TY203023 {
TGenericSeq Sup;
Enumdesc203007 data[SEQ_DECL_SIZE];
};
struct TY292960 {
TGenericSeq Sup;
TY292961 data[SEQ_DECL_SIZE];
};
struct TY332033 {
TGenericSeq Sup;
NI32 data[SEQ_DECL_SIZE];
};
struct Tiipairseq299140 {
TGenericSeq Sup;
Tiipair299138 data[SEQ_DECL_SIZE];
};
struct Keyvaluepairseq332057 {
TGenericSeq Sup;
Keyvaluepair332060 data[SEQ_DECL_SIZE];
};
N_NIMCALL(void, nimGCvisit)(void* d0, NI op0);
N_NIMCALL(void, T839829468_2)(void);
N_NIMCALL(void, nimRegisterGlobalMarker)(Globalmarkerproc55802 markerproc0);
N_NIMCALL(void, T839829468_3)(void);
N_NIMCALL(Ropeobj178006*, rope_178277_2381377266)(NimStringDesc* s0);
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0);
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0);
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0);
N_NOINLINE(void, addzct_51417_1689653243)(Cellseq47321* s0, Cell47305* c0);
N_NIMCALL(void, T839829468_5)(void);
N_NIMCALL(void, T839829468_6)(void);
static N_INLINE(void, nimGCunrefNoCycle)(void* p0);
N_NIMCALL(void*, newSeqRC1)(TNimType* typ0, NI len0);
N_NIMCALL(void, T839829468_7)(void);
N_NIMCALL(void, initintset_268885_2627731572)(Intset268030* Result);
N_NOINLINE(void, chckNil)(void* p0);
N_NIMCALL(void, genericReset)(void* dest0, TNimType* mt0);
N_NIMCALL(void, T839829468_8)(void);
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0);
N_NIMCALL(void, internalerror_196113_155036129)(NimStringDesc* errmsg0);
N_NIMCALL(NimStringDesc*, HEX24_196185_1689653243)(TY203018 x0);
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0);
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0);
N_NIMCALL(void*, newObj)(TNimType* typ0, NI size0);
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0);
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0);
N_NIMCALL(NimStringDesc*, HEX24_8401_1689653243)(NU64 x0);
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0);
N_NIMCALL(NU32, register_203121_1926258066)(Debuginfo203009* self0, NimStringDesc* package0, NimStringDesc* file0);
N_NIMCALL(NimStringDesc*, rawNewString)(NI space0);
N_NIMCALL(void, initlinkedlist_147031_3771138726)(Tlinkedlist147013* list0);
N_NIMCALL(NimStringDesc*, copyStringRC1)(NimStringDesc* src0);
N_NIMCALL(void, initidtable_296019_850551059)(Tidtable292850* x0);
N_NIMCALL(Tcproc529021*, newproc_529206_3723162438)(Tsym292834* prc0, Tcgen529027* module0);
static N_INLINE(void, asgnRef)(void** dest0, void* src0);
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0);
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0);
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0);
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0);
N_NIMCALL(void, initnodetable_296085_850551059)(Tnodetable292862* x0);
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, HEX26_178418_2381377266)(Ropeobj178006* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rope_178401_2381377266)(NI64 i0);
N_NIMCALL(NimStringDesc*, tofullpath_192261_155036129)(NI32 fileidx0);
N_NIMCALL(TGenericSeq*, setLengthSeq)(TGenericSeq* seq0, NI elemsize0, NI newlen0);
N_NIMCALL(NimStringDesc*, tofilename_192257_155036129)(NI32 fileidx0);
N_NIMCALL(NimStringDesc*, noschangeFileExt)(NimStringDesc* filename0, NimStringDesc* ext0);
N_NIMCALL(NimStringDesc*, completecfilepath_273854_2528170400)(NimStringDesc* cfile0, NIM_BOOL createsubdir0);
N_NIMCALL(void, readmergeinfo_530613_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, copyString)(NimStringDesc* src0);
N_NIMCALL(NimStringDesc*, withpackagename_170073_2607990831)(NimStringDesc* path0);
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0);
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0);
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, unsureAsgnRef)(void** dest0, void* src0);
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0);
N_NIMCALL(NimStringDesc*, mangle_528847_2036603609)(NimStringDesc* name0);
N_NIMCALL(void, add_178487_2381377266)(Ropeobj178006** a0, NimStringDesc* b0);
N_NIMCALL(void, add_178482_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, HEX25_178905_2381377266)(NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, includestr_147249_3771138726)(Tlinkedlist147013* list0, NimStringDesc* data0);
N_NIMCALL(NimStringDesc*, getstr_297230_850551059)(Tnode292802* a0);
N_NIMCALL(Tsym292834*, getmodule_299123_2984716966)(Tsym292834* s0);
N_NIMCALL(NIM_BOOL, containsorincl_268862_2627731572)(Intset268030* s0, NI key0);
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, nimIntToStr)(NI x0);
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI start_79210_1689653243, NI last0);
N_NIMCALL(NimStringDesc*, copyStrLast)(NimStringDesc* s0, NI first0, NI last0);
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0);
N_NIMCALL(Tsym292834*, getcompilerproc_338748_3937434831)(NimStringDesc* name0);
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0);
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(TGenericSeq*, incrSeqV2)(TGenericSeq* seq0, NI elemsize0);
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0);
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0);
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0);
N_NIMCALL(void, addf_179205_2381377266)(Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NimStringDesc*, makesinglelinecstring_528835_2036603609)(NimStringDesc* s0);
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0);
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0);
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0);
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0);
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0);
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0);
N_NIMCALL(NI64, getsize_320135_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ttype292840*, lastson_295377_850551059)(Ttype292840* n0);
N_NIMCALL(NI64, firstord_320001_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, skiptypes_296099_850551059)(Ttype292840* t0, Ttypekind292244Set kinds0);
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, containsgarbagecollectedref_320117_3876443242)(Ttype292840* typ0);
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, ispureobject_320138_3876443242)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ttype292840*, getuniquetype_528640_2036603609)(Ttype292840* key0);
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0);
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0);
N_NIMCALL(NimStringDesc*, reprEnum)(NI e0, TNimType* typ0);
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0);
N_NIMCALL(TNimObject*, idtableget_299086_2984716966)(Tidtable292850 t0, Tidobj199004* key0);
N_NIMCALL(NimStringDesc*, typetostring_320017_3876443242)(Ttype292840* typ0, Tprefereddesc320011 prefer0);
N_NIMCALL(Ttype292840*, elemtype_320394_3876443242)(Ttype292840* t0);
N_NIMCALL(Ropeobj178006*, HEX26_178447_2381377266)(Ropeobj178006* a0, NimStringDesc* b0);
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0);
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0);
N_NIMCALL(void, idtableput_299094_2984716966)(Tidtable292850* t0, Tidobj199004* key0, TNimObject* val0);
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0);
N_NIMCALL(void, internalerror_196100_155036129)(Tlineinfo191336 info0, NimStringDesc* errmsg0);
N_NIMCALL(NIM_BOOL, hasenum_203230_1926258066)(Debuginfo203009* self0, NimStringDesc* ename0, NI id0, NU32 owner0);
N_NIMCALL(void*, newSeq)(TNimType* typ0, NI len0);
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0);
N_NIMCALL(void, registerenum_203419_1926258066)(Debuginfo203009* self0, Enumdesc203007* ed0);
N_NIMCALL(void, genericSeqAssign)(void* dest0, void* src_86404_1689653243, TNimType* mt0);
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(NI64, lengthord_320007_3876443242)(Ttype292840* t0);
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0);
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0);
N_NIMCALL(NI, len_295339_850551059)(Ttype292840* n0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI start0);
N_NIMCALL(NimStringDesc*, copyStr)(NimStringDesc* s0, NI first0);
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0);
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295351_850551059)(Tnode292802* n0);
N_NIMCALL(Tnode292802*, lastson_295364_850551059)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, HEX26_178452_2381377266)(NimStringDesc* a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0);
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0);
N_NIMCALL(NIM_CHAR, nsuToUpperAsciiChar)(NIM_CHAR c0);
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0);
N_NIMCALL(NI, sonslen_295327_850551059)(Ttype292840* n0);
N_NIMCALL(void, excl_268841_2627731572)(Intset268030* s0, NI key0);
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0);
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0);
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0);
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0);
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0);
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0);
N_NIMCALL(Tnode292802*, getbody_335226_1724185294)(Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0);
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0);
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0);
N_NIMCALL(void, prepend_178893_2381377266)(Ropeobj178006** a0, Ropeobj178006* b0);
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0);
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0);
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, makecstring_191638_155036129)(NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, nsuNormalize)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468);
N_NIMCALL(Tcgen529027*, bmod_529201_3723162438)(Tsym292834* module0);
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0);
N_NIMCALL(NIM_BOOL, canformacycle_320123_3876443242)(Ttype292840* typ0);
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0);
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0);
N_NIMCALL(Ttype292840*, newtype_295107_850551059)(Ttypekind292244 kind0, Tsym292834* owner0);
N_NIMCALL(void, rawaddson_296394_850551059)(Ttype292840* father0, Ttype292840* son0);
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0);
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0);
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0);
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0);
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0);
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0);
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0);
N_NIMCALL(Ttypefieldresult320145, analyseobjectwithtypefield_320149_3876443242)(Ttype292840* t0);
N_NIMCALL(Ttype292840*, getsystype_338150_3937434831)(Ttypekind292244 kind0);
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468);
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0);
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0);
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0);
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0);
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0);
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0);
N_NIMCALL(NI, nodetabletestorset_342682_1142335848)(Tnodetable292862* t0, Tnode292802* key0, NI val0);
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0);
N_NIMCALL(NimStringDesc*, tostrmaxprecision_298007_3471544153)(NF f0);
N_NIMCALL(Tnode292802*, copynode_296528_850551059)(Tnode292802* src0);
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0);
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0);
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0);
N_NIMCALL(Tsym292834*, lookupinrecord_299119_2984716966)(Tnode292802* n0, Tident199010* field0);
N_NIMCALL(NI64, getordvalue_320129_3876443242)(Tnode292802* n0);
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0);
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0);
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0);
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0);
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0);
N_NIMCALL(Ropeobj178006*, quotedfilename_196818_155036129)(Tlineinfo191336 i0);
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0);
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0);
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0);
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0);
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0);
N_NIMCALL(void, libcandidates_170605_2607990831)(NimStringDesc* s0, TY135002** dest0);
N_NIMCALL(void, rawmessage_194612_155036129)(Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0);
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0);
N_NIMCALL(NimStringDesc*, HEX24_178856_2381377266)(Ropeobj178006* r0);
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0);
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void);
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0);
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void);
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result);
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0);
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0);
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0);
N_NIMCALL(void, genericAssign)(void* dest0, void* src0, TNimType* mt0);
N_NIMCALL(void, localerror_196085_155036129)(Tlineinfo191336 info0, NimStringDesc* arg0);
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0);
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0);
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0);
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0);
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, tobitset_340001_452470228)(Tnode292802* s0, Tbitset339004** b0);
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0);
N_NIMCALL(NimStringDesc*, nsuToHex)(NI64 x0, NI len0);
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0);
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0);
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0);
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0);
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0);
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0);
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, sourceline_192065_155036129)(Tlineinfo191336 i0);
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0);
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0);
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0);
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0);
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0);
N_NIMCALL(NI64, lastord_320004_3876443242)(Ttype292840* t0);
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0);
N_NIMCALL(void, message_196095_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(NimStringDesc*, rendertree_311044_382274130)(Tnode292802* n0, Trenderflag311004Set renderflags0);
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0);
N_NIMCALL(void, globalerror_196071_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0);
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468);
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0);
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0);
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0);
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0);
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0);
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0);
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0);
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0);
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, skipconv_328882_3876443242)(Tnode292802* n0);
N_NIMCALL(Tmagic292524, getmagic_318502_2616423590)(Tnode292802* op0);
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0);
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0);
N_NIMCALL(Tanalysisresult473003, ispartof_473340_788060399)(Tnode292802* a0, Tnode292802* b0);
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0);
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0);
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0);
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110056_4286263276)(NimStringDesc* s0, char136Set chars0);
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468);
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0);
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0);
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0);
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0);
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, contains_110046_4286263276)(NimStringDesc* s0, NIM_CHAR c0);
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0);
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NimStringDesc*, nsuRepeatStr)(NimStringDesc* s0, NI n0);
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, localerror_196080_155036129)(Tlineinfo191336 info0, Tmsgkind191002 msg0, NimStringDesc* arg0);
N_NIMCALL(Tnode292802*, wrapprocforspawn_435501_2218250499)(Tsym292834* owner0, Tnode292802* spawnexpr0, Ttype292840* rettype0, Tnode292802* barrier0, Tnode292802* dest0);
N_NIMCALL(Tnode292802*, liftparallel_478822_1773027539)(Tsym292834* owner0, Tnode292802* n0);
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0);
N_NIMCALL(NIM_BOOL, isdeepconstexpr_318566_2616423590)(Tnode292802* n0);
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NimStringDesc*, nimInt64ToStr)(NI64 x0);
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0);
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0);
N_NIMCALL(Tnode292802*, newstrnode_293677_850551059)(Tnodekind292020 kind0, NimStringDesc* strval0);
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, comparetypes_326214_3876443242)(Ttype292840* x0, Ttype292840* y0, Tdistinctcompare324427 cmp0, Ttypecmpflag324429Set flags0);
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0);
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isconstexpr_318510_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0);
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0);
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0);
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0);
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0);
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0);
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0);
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0);
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, inheritancediff_326252_3876443242)(Ttype292840* a0, Ttype292840* b0);
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0);
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0);
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0);
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0);
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0);
N_NIMCALL(NIM_BOOL, stmtscontainpragma_528083_2036603609)(Tnode292802* n0, Tspecialword275003 w0);
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0);
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0);
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0);
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0);
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0);
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, containshiddenpointer_320120_3876443242)(Ttype292840* typ0);
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0);
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0);
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0);
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tnode292802*, lowertupleunpacking_433037_2218250499)(Tnode292802* n0, Tsym292834* owner0);
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NIM_BOOL, containscompiletimeonly_328721_3876443242)(Ttype292840* t0);
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0);
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NI, nextpoweroftwo_101629_1009420244)(NI x0);
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0);
N_NIMCALL(NI64, hashstring_528100_2036603609)(NimStringDesc* s0);
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0);
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0);
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0);
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0);
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0);
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0);
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0);
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0);
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0);
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0);
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0);
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0);
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0);
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0);
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0);
N_NIMCALL(NimStringDesc*, resizeString)(NimStringDesc* dest0, NI addlen0);
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0);
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0);
N_NIMCALL(NIM_BOOL, isdefined_200011_1967573533)(NimStringDesc* symbol0);
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0);
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0);
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0);
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0);
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0);
N_NIMCALL(Tspecialword275003, whichpragma_318911_2616423590)(Tnode292802* n0);
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0);
N_NIMCALL(NIM_BOOL, nsuStartsWith)(NimStringDesc* s0, NimStringDesc* prefix0);
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(Tsym292834*, skipgenericowner_297280_850551059)(Tsym292834* s0);
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0);
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0);
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0);
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0);
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0);
N_NIMCALL(Tnode292802*, generatemethoddispatchers_432151_3853300031)(void);
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0);
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0);
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, mergerequired_530832_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, mergefiles_531241_2760143328)(NimStringDesc* cfilename0, Tcgen529027* m0);
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530081_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionend_530116_2760143328)(Tcprocsection529011 ps0);
N_NIMCALL(Ropeobj178006*, gensectionstart_530015_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(Ropeobj178006*, gensectionend_530050_2760143328)(Tcfilesection529005 fs0);
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0);
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0);
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, getcompilecfilecmd_274284_2528170400)(NimStringDesc* cfilename0, NIM_BOOL isexternal0);
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0);
N_NIMCALL(Ropeobj178006*, genmergeinfo_530203_2760143328)(Tcgen529027* m0);
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0);
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0);
N_NIMCALL(NimStringDesc*, nsuReplaceChar)(NimStringDesc* s0, NIM_CHAR sub0, NIM_CHAR by0);
N_NIMCALL(void, writerope_178836_2381377266)(Ropeobj178006* head0, NimStringDesc* filename0, NIM_BOOL usewarning0);
N_NIMCALL(void, addfiletocompile_273863_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, addfiletolink_273872_2528170400)(NimStringDesc* filename0);
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0);
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0);
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0);
N_NIMCALL(NimStringDesc*, toobjfile_273859_2528170400)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, writeropeifnotequal_179511_2381377266)(Ropeobj178006* r0, NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosexistsFile)(NimStringDesc* filename0);
N_NIMCALL(NIM_BOOL, nosfileNewer)(NimStringDesc* a0, NimStringDesc* b0);
N_NIMCALL(void, writemapping_274789_2528170400)(Ropeobj178006* gsymbolmapping0);
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nossplitFile)(NimStringDesc* path0, TY128506* Result);
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0);
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0);
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0);
STRING_LITERAL(T839829468_4, "\011", 1);
STRING_LITERAL(T839829468_10, "compiler/cgen.nim", 17);
NIM_CONST TY203018 T839829468_9 = {((NimStringDesc*) &T839829468_10),
((NI) 1158)}
;
STRING_LITERAL(T839829468_11, "T", 1);
STRING_LITERAL(T839829468_12, "_", 1);
STRING_LITERAL(T839829468_13, "added pending module twice: ", 28);
STRING_LITERAL(T839829468_14, ".h", 2);
STRING_LITERAL(T839829468_15, ".cpp", 4);
STRING_LITERAL(T839829468_16, ".m", 2);
STRING_LITERAL(T839829468_17, ".c", 2);
STRING_LITERAL(T839829468_18, "0", 1);
STRING_LITERAL(T839829468_19, "$", 1);
STRING_LITERAL(T839829468_20, "ropes: invalid format string $", 30);
STRING_LITERAL(T839829468_21, "$N#line $2 $1$N", 15);
STRING_LITERAL(T839829468_22, "N_LIB_IMPORT ", 13);
STRING_LITERAL(T839829468_23, "N_LIB_EXPORT ", 13);
STRING_LITERAL(T839829468_24, "static ", 7);
STRING_LITERAL(T839829468_25, "mapType", 7);
STRING_LITERAL(T839829468_26, "void", 4);
STRING_LITERAL(T839829468_27, "getTypeDescAux: t == nil", 24);
STRING_LITERAL(T839829468_28, "TY", 2);
STRING_LITERAL(T839829468_29, "getTypeName: ", 13);
STRING_LITERAL(T839829468_30, "void*", 5);
STRING_LITERAL(T839829468_31, "NimStringDesc", 13);
STRING_LITERAL(T839829468_32, "NimStringDesc*", 14);
STRING_LITERAL(T839829468_33, "NCSTRING", 8);
STRING_LITERAL(T839829468_34, "NIM_BOOL", 8);
STRING_LITERAL(T839829468_35, "NIM_CHAR", 8);
STRING_LITERAL(T839829468_36, "NI", 2);
STRING_LITERAL(T839829468_37, "NI8", 3);
STRING_LITERAL(T839829468_38, "NI16", 4);
STRING_LITERAL(T839829468_39, "NI32", 4);
STRING_LITERAL(T839829468_40, "NI64", 4);
STRING_LITERAL(T839829468_41, "NF", 2);
STRING_LITERAL(T839829468_42, "NF32", 4);
STRING_LITERAL(T839829468_43, "NF64", 4);
STRING_LITERAL(T839829468_44, "NF128", 5);
STRING_LITERAL(T839829468_45, "NU", 2);
STRING_LITERAL(T839829468_46, "NU8", 3);
STRING_LITERAL(T839829468_47, "NU16", 4);
STRING_LITERAL(T839829468_48, "NU32", 4);
STRING_LITERAL(T839829468_49, "NU64", 4);
NIM_CONST TY533943 Numericaltypetostr_533941_839829468 = {((NimStringDesc*) &T839829468_36),
((NimStringDesc*) &T839829468_37),
((NimStringDesc*) &T839829468_38),
((NimStringDesc*) &T839829468_39),
((NimStringDesc*) &T839829468_40),
((NimStringDesc*) &T839829468_41),
((NimStringDesc*) &T839829468_42),
((NimStringDesc*) &T839829468_43),
((NimStringDesc*) &T839829468_44),
((NimStringDesc*) &T839829468_45),
((NimStringDesc*) &T839829468_46),
((NimStringDesc*) &T839829468_47),
((NimStringDesc*) &T839829468_48),
((NimStringDesc*) &T839829468_49)}
;
STRING_LITERAL(T839829468_50, "tyStatic for getSimpleTypeDesc", 30);
STRING_LITERAL(T839829468_51, "cannot generate C type for: ", 28);
STRING_LITERAL(T839829468_52, "&", 1);
STRING_LITERAL(T839829468_53, "*", 1);
STRING_LITERAL(T839829468_54, "$1 $2;$n", 8);
STRING_LITERAL(T839829468_55, "typedef $1 $2 $2;$n", 19);
STRING_LITERAL(T839829468_56, "union", 5);
STRING_LITERAL(T839829468_57, "struct", 6);
STRING_LITERAL(T839829468_58, "getTypeForward(", 15);
STRING_LITERAL(T839829468_59, "typedef NI32 $1;$n", 18);
STRING_LITERAL(T839829468_60, "typedef NU8 $1;$n", 17);
STRING_LITERAL(T839829468_61, "typedef NU16 $1;$n", 18);
STRING_LITERAL(T839829468_62, "typedef NI64 $1;$n", 18);
STRING_LITERAL(T839829468_63, "getTypeDescAux: enum", 20);
STRING_LITERAL(T839829468_64, "typedef $1_PTR($2, $3) $4;$n", 28);
STRING_LITERAL(T839829468_65, "N_NIMCALL", 9);
STRING_LITERAL(T839829468_66, "N_STDCALL", 9);
STRING_LITERAL(T839829468_67, "N_CDECL", 7);
STRING_LITERAL(T839829468_68, "N_SAFECALL", 10);
STRING_LITERAL(T839829468_69, "N_SYSCALL", 9);
STRING_LITERAL(T839829468_70, "N_INLINE", 8);
STRING_LITERAL(T839829468_71, "N_NOINLINE", 10);
STRING_LITERAL(T839829468_72, "N_FASTCALL", 10);
STRING_LITERAL(T839829468_73, "N_CLOSURE", 9);
STRING_LITERAL(T839829468_74, "N_NOCONV", 8);
NIM_CONST TY292016 Callingconvtostr_533587_839829468 = {((NimStringDesc*) &T839829468_65),
((NimStringDesc*) &T839829468_66),
((NimStringDesc*) &T839829468_67),
((NimStringDesc*) &T839829468_68),
((NimStringDesc*) &T839829468_69),
((NimStringDesc*) &T839829468_70),
((NimStringDesc*) &T839829468_71),
((NimStringDesc*) &T839829468_72),
((NimStringDesc*) &T839829468_73),
((NimStringDesc*) &T839829468_74)}
;
STRING_LITERAL(T839829468_75, "typedef struct {$nN_NIMCALL_PTR($2, ClPrc) $3;$nvoid* ClEnv;$n}"
" $1;$n", 69);
STRING_LITERAL(T839829468_76, "struct $2 : #TGenericSeq {$n", 28);
STRING_LITERAL(T839829468_77, "struct $2 {$n #TGenericSeq Sup;$n", 34);
STRING_LITERAL(T839829468_78, " $1 data[SEQ_DECL_SIZE];$n};$n", 31);
STRING_LITERAL(T839829468_79, "TGenericSeq", 11);
STRING_LITERAL(T839829468_80, "typedef $1 $2[$3];$n", 20);
STRING_LITERAL(T839829468_81, "invalid apostrophe type parameter index", 39);
STRING_LITERAL(T839829468_82, "<", 1);
STRING_LITERAL(T839829468_83, " COMMA ", 7);
STRING_LITERAL(T839829468_84, "> ", 2);
extern NIM_CONST TY273427 Cc_273413_2528170400;
STRING_LITERAL(T839829468_85, " {$n", 4);
STRING_LITERAL(T839829468_86, " {$n#TNimType* m_type;$n", 24);
STRING_LITERAL(T839829468_87, " : public $1 {$n", 16);
STRING_LITERAL(T839829468_88, " {$n $1 Sup;$n", 15);
STRING_LITERAL(T839829468_89, "genRecordFieldsAux", 18);
STRING_LITERAL(T839829468_90, "$1.$2", 5);
STRING_LITERAL(T839829468_91, "S", 1);
STRING_LITERAL(T839829468_92, "struct {", 8);
STRING_LITERAL(T839829468_93, "} $1;$n", 7);
STRING_LITERAL(T839829468_94, "genRecordFieldsAux(record case branch)", 38);
STRING_LITERAL(T839829468_95, "union{$n$1} $2;$n", 17);
STRING_LITERAL(T839829468_96, "mangleRecFieldName", 18);
STRING_LITERAL(T839829468_97, "$1 $2[SEQ_DECL_SIZE];$n", 23);
STRING_LITERAL(T839829468_98, "$1 $2:$3;$n", 11);
STRING_LITERAL(T839829468_99, "genRecordFieldsAux()", 20);
STRING_LITERAL(T839829468_100, "char dummy;$n", 13);
STRING_LITERAL(T839829468_101, "};", 2);
STRING_LITERAL(T839829468_102, "$1 $2 {$n", 9);
STRING_LITERAL(T839829468_103, "$1 Field$2;$n", 13);
STRING_LITERAL(T839829468_104, "char dummy;", 11);
STRING_LITERAL(T839829468_105, "Set", 3);
STRING_LITERAL(T839829468_106, "typedef NU$2 $1;$n", 18);
STRING_LITERAL(T839829468_107, "typedef NU8 $1[$2];$n", 21);
STRING_LITERAL(T839829468_108, "getTypeDescAux(", 15);
STRING_LITERAL(T839829468_109, "genProcParams", 13);
STRING_LITERAL(T839829468_110, ", ", 2);
STRING_LITERAL(T839829468_111, " ", 1);
STRING_LITERAL(T839829468_112, ", NI $1Len$2", 12);
STRING_LITERAL(T839829468_113, " Result", 7);
STRING_LITERAL(T839829468_114, "void* ClEnv", 11);
STRING_LITERAL(T839829468_115, "...", 3);
STRING_LITERAL(T839829468_116, "void)", 5);
STRING_LITERAL(T839829468_117, ")", 1);
STRING_LITERAL(T839829468_118, "(", 1);
STRING_LITERAL(T839829468_119, "$1($2, $3)$4", 12);
STRING_LITERAL(T839829468_120, "proc has no result symbol", 25);
STRING_LITERAL(T839829468_121, " register", 9);
STRING_LITERAL(T839829468_122, " volatile", 9);
STRING_LITERAL(T839829468_123, "$1 = $2;$n", 10);
STRING_LITERAL(T839829468_124, "(*$1)", 5);
STRING_LITERAL(T839829468_125, ";", 1);
STRING_LITERAL(T839829468_126, "FR.s[$1].address = (void*)$3; FR.s[$1].typ = $4; FR.s[$1].name "
"= $2;$n", 70);
STRING_LITERAL(T839829468_127, "NTI$1", 5);
STRING_LITERAL(T839829468_128, "(&", 2);
STRING_LITERAL(T839829468_129, "TNimType", 8);
STRING_LITERAL(T839829468_130, "TNimNode", 8);
STRING_LITERAL(T839829468_131, "extern TNimType $1; /* $2 */$n", 30);
STRING_LITERAL(T839829468_132, "0", 1);
STRING_LITERAL(T839829468_133, "void*", 5);
STRING_LITERAL(T839829468_134, "$1.size = sizeof($2);$n$1.kind = $3;$n$1.base = $4;$n", 53);
STRING_LITERAL(T839829468_135, "$1.flags = $2;$n", 16);
STRING_LITERAL(T839829468_136, "TNimType $1; /* $2 */$n", 23);
STRING_LITERAL(T839829468_137, "genTypeInfo(", 12);
STRING_LITERAL(T839829468_138, "$1[$2]", 6);
STRING_LITERAL(T839829468_139, "static TNimNode* $1[$2];$n", 26);
STRING_LITERAL(T839829468_140, "$1[$2] = &$3;$n", 15);
STRING_LITERAL(T839829468_141, "$1.kind = 1;$n$1.offset = offsetof($2, Field$3);$n$1.typ = $4;$"
"n$1.name = \"Field$3\";$n", 86);
STRING_LITERAL(T839829468_142, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n", 45);
STRING_LITERAL(T839829468_143, "$1.len = $2; $1.kind = 2;$n", 27);
STRING_LITERAL(T839829468_144, "$1.node = &$2;$n", 16);
STRING_LITERAL(T839829468_145, "#nimGCvisit((void*)$1, op);$n", 29);
STRING_LITERAL(T839829468_146, "N_NIMCALL(void, $1)(void* p, NI op)", 35);
STRING_LITERAL(T839829468_147, "$1 a;$n", 7);
STRING_LITERAL(T839829468_148, "a = ($1)p;$n", 12);
STRING_LITERAL(T839829468_149, "LOC", 3);
STRING_LITERAL(T839829468_150, "$1 = ($2)0;$n", 13);
STRING_LITERAL(T839829468_151, "<string.h>", 10);
STRING_LITERAL(T839829468_152, "memset((void*)$1, 0, sizeof($2));$n", 35);
STRING_LITERAL(T839829468_153, ".Sup", 4);
STRING_LITERAL(T839829468_154, "$1.m_type = $2;$n", 17);
STRING_LITERAL(T839829468_155, "#objectInit($1, $2);$n", 22);
STRING_LITERAL(T839829468_156, "for ($1 = 0; $1 < $2->$3; $1++) {$n", 35);
STRING_LITERAL(T839829468_157, "len", 3);
STRING_LITERAL(T839829468_158, "Sup.len", 7);
STRING_LITERAL(T839829468_159, "for ($1 = 0; $1 < $2; $1++) {$n", 31);
STRING_LITERAL(T839829468_160, "}$n", 3);
STRING_LITERAL(T839829468_161, "$1.Sup", 6);
STRING_LITERAL(T839829468_162, "genTraverseProc", 15);
STRING_LITERAL(T839829468_163, "switch ($1.$2) {$n", 18);
STRING_LITERAL(T839829468_164, "case $1 ... $2:$n", 17);
STRING_LITERAL(T839829468_165, "genLiteral: ty is nil", 21);
STRING_LITERAL(T839829468_166, "(-2147483647 -1)", 16);
STRING_LITERAL(T839829468_167, "IL64($1)", 8);
STRING_LITERAL(T839829468_168, "(IL64(-9223372036854775807) - IL64(1))", 38);
STRING_LITERAL(T839829468_169, "NIM_TRUE", 8);
STRING_LITERAL(T839829468_170, "NIM_FALSE", 9);
STRING_LITERAL(T839829468_171, "ULL", 3);
STRING_LITERAL(T839829468_172, "(($1) $2)", 9);
STRING_LITERAL(T839829468_173, "static NIM_CONST $1 $2 = {NIM_NIL,NIM_NIL};$n", 45);
STRING_LITERAL(T839829468_174, "NIM_NIL", 7);
STRING_LITERAL(T839829468_175, "((#NimStringDesc*) NIM_NIL)", 27);
STRING_LITERAL(T839829468_176, "((#NimStringDesc*) &$1)", 23);
STRING_LITERAL(T839829468_177, "STRING_LITERAL($1, $2, $3);$n", 29);
STRING_LITERAL(T839829468_178, "((#NimStringDesc*) &$1$2)", 25);
STRING_LITERAL(T839829468_179, "genLiteral(", 11);
STRING_LITERAL(T839829468_180, "case $1:$n", 10);
STRING_LITERAL(T839829468_181, "default:$n", 10);
STRING_LITERAL(T839829468_182, "break;$n", 8);
STRING_LITERAL(T839829468_183, "} $n", 4);
STRING_LITERAL(T839829468_184, "genTraverseProc()", 17);
STRING_LITERAL(T839829468_185, "$1.Field$2", 10);
STRING_LITERAL(T839829468_186, "$1.ClEnv", 8);
STRING_LITERAL(T839829468_187, "$1->data[$2]", 12);
STRING_LITERAL(T839829468_188, "a", 1);
STRING_LITERAL(T839829468_189, "(*a)", 4);
STRING_LITERAL(T839829468_190, "$1 {$n$2$3$4}$n", 15);
STRING_LITERAL(T839829468_191, "$1;$n", 5);
STRING_LITERAL(T839829468_192, "$1.marker = $2;$n", 17);
STRING_LITERAL(T839829468_193, "$1.len = $2; $1.kind = 0;$n$3.node = &$1;$n", 43);
STRING_LITERAL(T839829468_194, "$1.offset = $2;$n", 17);
STRING_LITERAL(T839829468_195, "NI $1;$n", 8);
STRING_LITERAL(T839829468_196, "static char* NIM_CONST $1[$2] = {$n$3};$n", 41);
STRING_LITERAL(T839829468_197, "for ($1 = 0; $1 < $2; $1++) {$n$3[$1+$4].kind = 1;$n$3[$1+$4].o"
"ffset = $1;$n$3[$1+$4].name = $5[$1];$n$6[$1] = &$3[$1+$4];$n}$n", 127);
STRING_LITERAL(T839829468_198, "$1.len = $2; $1.kind = 2; $1.sons = &$3[0];$n$4.node = &$1;$n", 61);
STRING_LITERAL(T839829468_199, "$1.flags = 1<<2;$n", 18);
STRING_LITERAL(T839829468_200, "anonymous obj with discriminator", 32);
STRING_LITERAL(T839829468_201, "NimDT_$1_$2", 11);
STRING_LITERAL(T839829468_202, "$1.kind = 3;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n$1.sons = &$6[0];$n$1.len = $7;$n", 107);
STRING_LITERAL(T839829468_203, "TNimNode* $1[$2];$n", 19);
STRING_LITERAL(T839829468_204, "genObjectFields; nkOfBranch broken", 34);
STRING_LITERAL(T839829468_205, "genObjectFields(nkRecCase)", 26);
STRING_LITERAL(T839829468_206, "$1.kind = 1;$n$1.offset = offsetof($2, $3);$n$1.typ = $4;$n$1.n"
"ame = $5;$n", 74);
STRING_LITERAL(T839829468_207, "genObjectFields", 15);
STRING_LITERAL(T839829468_208, "$1.deepcopy =(void* (N_RAW_NIMCALL*)(void*))$2;$n", 49);
STRING_LITERAL(T839829468_209, "\011return $1;$n", 13);
STRING_LITERAL(T839829468_210, "Result", 6);
STRING_LITERAL(T839829468_211, "closure generation failed", 25);
STRING_LITERAL(T839829468_212, "$1 = ($2) ClEnv;$n", 18);
STRING_LITERAL(T839829468_213, "__declspec(noreturn) ", 21);
STRING_LITERAL(T839829468_214, "__declspec(naked) ", 18);
STRING_LITERAL(T839829468_215, "$N$1 {$n$2$3$4}$N$N", 19);
STRING_LITERAL(T839829468_216, "$N$1 {$N", 8);
STRING_LITERAL(T839829468_217, "struct {$1} GCFRAME;$n", 22);
STRING_LITERAL(T839829468_218, "nimFrame", 8);
STRING_LITERAL(T839829468_219, "VarSlot", 7);
STRING_LITERAL(T839829468_220, "\011nimfrs($1, $2, $3, $4)$N", 25);
STRING_LITERAL(T839829468_221, "\011nimfr($1, $2)$N", 16);
STRING_LITERAL(T839829468_222, "\011#nimProfile();$n", 17);
STRING_LITERAL(T839829468_223, "{", 1);
STRING_LITERAL(T839829468_224, "\011}BeforeRet: ;$n", 16);
STRING_LITERAL(T839829468_225, "if (((NU)&GCFRAME) < 4096) #nimGCFrame(&GCFRAME);$n", 51);
STRING_LITERAL(T839829468_226, "\011#popFrame();$n", 15);
STRING_LITERAL(T839829468_227, "}$N", 3);
STRING_LITERAL(T839829468_228, "static void* $1;$n", 18);
STRING_LITERAL(T839829468_229, "||", 2);
STRING_LITERAL(T839829468_230, "($1 = #nimLoadLibrary((#NimStringDesc*) &$2))$n", 47);
STRING_LITERAL(T839829468_231, "if (!($1)) #nimLoadLibraryError((#NimStringDesc*) &$2);$n", 57);
STRING_LITERAL(T839829468_232, "if (!($1 = #nimLoadLibrary($2))) #nimLoadLibraryError($2);$n", 60);
STRING_LITERAL(T839829468_233, "loadDynamicLib", 14);
STRING_LITERAL(T839829468_234, "Dl_$1", 5);
STRING_LITERAL(T839829468_235, "\011$1 = ($2) ($3$4));$n", 21);
NIM_CONST TY203018 T839829468_236 = {((NimStringDesc*) &T839829468_10),
((NI) 535)}
;
STRING_LITERAL(T839829468_237, "wrong index: ", 13);
STRING_LITERAL(T839829468_238, "\011$1 = ($2) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_239, "$2 $1;$n", 8);
STRING_LITERAL(T839829468_240, "extern ", 7);
STRING_LITERAL(T839829468_241, "NIM_THREADVAR ", 14);
STRING_LITERAL(T839829468_242, " $1;$n", 6);
STRING_LITERAL(T839829468_243, "cgsym: ", 7);
STRING_LITERAL(T839829468_244, ": ", 2);
STRING_LITERAL(T839829468_245, "extern $1 $2;$n", 15);
STRING_LITERAL(T839829468_246, "extern \"C\" ", 11);
STRING_LITERAL(T839829468_247, " __attribute__((naked))", 23);
STRING_LITERAL(T839829468_248, " __attribute__((noreturn))", 26);
STRING_LITERAL(T839829468_249, "#asgnRef((void**) $1, $2);$n", 28);
STRING_LITERAL(T839829468_250, "#asgnRefNoCycle((void**) $1, $2);$n", 35);
STRING_LITERAL(T839829468_251, "#unsureAsgnRef((void**) $1, $2);$n", 34);
STRING_LITERAL(T839829468_252, "#genericSeqAssign($1, $2, $3);$n", 32);
STRING_LITERAL(T839829468_253, "$1 = #copyString($2);$n", 23);
STRING_LITERAL(T839829468_254, "$3 = $1; $1 = #copyStringRC1($2);$n", 35);
STRING_LITERAL(T839829468_255, "if ($1) #nimGCunrefNoCycle($1);$n", 33);
STRING_LITERAL(T839829468_256, "#unsureAsgnRef((void**) $1, #copyString($2));$n", 47);
STRING_LITERAL(T839829468_257, ".", 1);
STRING_LITERAL(T839829468_258, "ClEnv", 5);
STRING_LITERAL(T839829468_259, "$1.ClPrc = $2.ClPrc;$n", 22);
STRING_LITERAL(T839829468_260, "Field$1", 7);
STRING_LITERAL(T839829468_261, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($3));$n", 53);
STRING_LITERAL(T839829468_262, "#genericShallowAssign((void*)$1, (void*)$2, $3);$n", 50);
STRING_LITERAL(T839829468_263, "#genericAssign((void*)$1, (void*)$2, $3);$n", 43);
STRING_LITERAL(T839829468_265, "compiler/ccgexprs.nim", 21);
NIM_CONST TY203018 T839829468_264 = {((NimStringDesc*) &T839829468_265),
((NI) 320)}
;
STRING_LITERAL(T839829468_266, "#genericAssignOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 60);
STRING_LITERAL(T839829468_267, "memcpy((void*)$1, (NIM_CONST void*)$2, sizeof($1[0])*$1Len0);$n", 63);
STRING_LITERAL(T839829468_268, "memcpy((void*)$1, (NIM_CONST void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_269, "genAssignment: ", 15);
STRING_LITERAL(T839829468_270, "request to generate code for .compileTime proc: ", 48);
STRING_LITERAL(T839829468_271, "expr: proc not init ", 20);
STRING_LITERAL(T839829468_272, "NIM_CONST $1 $2 = $3;$n", 23);
STRING_LITERAL(T839829468_273, "{$n", 3);
STRING_LITERAL(T839829468_274, "0x$1,$n", 7);
STRING_LITERAL(T839829468_275, "0x$1, ", 6);
STRING_LITERAL(T839829468_276, "0x$1}$n", 7);
STRING_LITERAL(T839829468_277, "{{$1, $1}", 9);
STRING_LITERAL(T839829468_278, ", {", 3);
STRING_LITERAL(T839829468_279, ",$n", 3);
STRING_LITERAL(T839829468_280, "}", 1);
STRING_LITERAL(T839829468_281, "NIM_CONST struct {$n #TGenericSeq Sup;$n $1 data[$2];$n} $3 ="
" $4;$n", 69);
STRING_LITERAL(T839829468_282, "(($1)&$2)", 9);
STRING_LITERAL(T839829468_283, "$1,$n", 5);
STRING_LITERAL(T839829468_284, "extern NIM_CONST $1 $2;$n", 25);
STRING_LITERAL(T839829468_285, "expr: var not init ", 19);
STRING_LITERAL(T839829468_286, "\011NimThreadVars* NimTV;$n", 24);
STRING_LITERAL(T839829468_287, "\011NimTV = (NimThreadVars*) #GetThreadLocalVars();$n", 50);
STRING_LITERAL(T839829468_288, "NimTV->", 7);
STRING_LITERAL(T839829468_289, "expr: temp not init ", 20);
STRING_LITERAL(T839829468_290, "expr: param not init ", 21);
STRING_LITERAL(T839829468_291, "expr(", 5);
STRING_LITERAL(T839829468_292, "); unknown symbol", 17);
STRING_LITERAL(T839829468_293, "//", 2);
STRING_LITERAL(T839829468_294, "#endb($1, $2);$n", 16);
STRING_LITERAL(T839829468_295, "nimln($1, $2);$n", 16);
STRING_LITERAL(T839829468_296, "LA", 2);
STRING_LITERAL(T839829468_297, "if ($1) goto $2;$n", 18);
STRING_LITERAL(T839829468_298, "if (!($1)) goto $2;$n", 21);
STRING_LITERAL(T839829468_299, "$1: ;$n", 7);
STRING_LITERAL(T839829468_300, "!($1)", 5);
STRING_LITERAL(T839829468_301, "$1", 2);
STRING_LITERAL(T839829468_302, "($3)((NU$2) ~($1))", 18);
STRING_LITERAL(T839829468_303, "-($1)", 5);
STRING_LITERAL(T839829468_304, "($1 > 0? ($1) : -($1))", 22);
STRING_LITERAL(T839829468_305, "(($3)(NU)(NU8)($1))", 19);
STRING_LITERAL(T839829468_306, "(($3)(NU64)(NU8)($1))", 21);
STRING_LITERAL(T839829468_307, "(($3)(NU)(NU16)($1))", 20);
STRING_LITERAL(T839829468_308, "(($3)(NU64)(NU16)($1))", 22);
STRING_LITERAL(T839829468_309, "(($3)(NU64)(NU32)($1))", 22);
STRING_LITERAL(T839829468_310, "(($3)(NU64)(NU)($1))", 20);
STRING_LITERAL(T839829468_311, "(($3)(NU8)(NU)($1))", 19);
STRING_LITERAL(T839829468_312, "(($3)(NU16)(NU)($1))", 20);
STRING_LITERAL(T839829468_313, "(($3)(NU32)(NU64)($1))", 22);
STRING_LITERAL(T839829468_314, "((double) ($1))", 15);
STRING_LITERAL(T839829468_315, "float64ToInt32($1)", 18);
STRING_LITERAL(T839829468_316, "float64ToInt64($1)", 18);
NIM_CONST TY552655 unarithtab_552653_839829468 = {((NimStringDesc*) &T839829468_300),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_302),
((NimStringDesc*) &T839829468_301),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304),
((NimStringDesc*) &T839829468_305),
((NimStringDesc*) &T839829468_306),
((NimStringDesc*) &T839829468_307),
((NimStringDesc*) &T839829468_308),
((NimStringDesc*) &T839829468_309),
((NimStringDesc*) &T839829468_310),
((NimStringDesc*) &T839829468_311),
((NimStringDesc*) &T839829468_312),
((NimStringDesc*) &T839829468_313),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_314),
((NimStringDesc*) &T839829468_315),
((NimStringDesc*) &T839829468_316)}
;
STRING_LITERAL(T839829468_317, "if ($1 == $2) #raiseOverflow();$n", 33);
STRING_LITERAL(T839829468_318, "((NI$2)-($1))", 13);
NIM_CONST TY551642 opr_551640_839829468 = {((NimStringDesc*) &T839829468_318),
((NimStringDesc*) &T839829468_303),
((NimStringDesc*) &T839829468_304)}
;
STRING_LITERAL(T839829468_319, "(($4)($2) $1 ($4)($3))", 22);
STRING_LITERAL(T839829468_320, "+", 1);
STRING_LITERAL(T839829468_321, "-", 1);
STRING_LITERAL(T839829468_322, "/", 1);
NIM_CONST TY556765 opr_556763_839829468 = {((NimStringDesc*) &T839829468_320),
((NimStringDesc*) &T839829468_321),
((NimStringDesc*) &T839829468_53),
((NimStringDesc*) &T839829468_322)}
;
STRING_LITERAL(T839829468_323, "#nanCheck($1);$n", 16);
STRING_LITERAL(T839829468_324, "#infCheck($1);$n", 16);
STRING_LITERAL(T839829468_325, "(($4)($1) + ($4)($2))", 21);
STRING_LITERAL(T839829468_326, "(($4)($1) - ($4)($2))", 21);
STRING_LITERAL(T839829468_327, "(($4)($1) * ($4)($2))", 21);
STRING_LITERAL(T839829468_328, "(($4)($1) / ($4)($2))", 21);
STRING_LITERAL(T839829468_329, "($4)((NU$3)($1) >> (NU$3)($2))", 30);
STRING_LITERAL(T839829468_330, "($4)((NU$3)($1) << (NU$3)($2))", 30);
STRING_LITERAL(T839829468_331, "($4)($1 & $2)", 13);
STRING_LITERAL(T839829468_332, "($4)($1 | $2)", 13);
STRING_LITERAL(T839829468_333, "($4)($1 ^ $2)", 13);
STRING_LITERAL(T839829468_334, "(($1 <= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_335, "(($1 >= $2) ? $1 : $2)", 22);
STRING_LITERAL(T839829468_336, "($4)((NU$3)($1) + (NU$3)($2))", 29);
STRING_LITERAL(T839829468_337, "($4)((NU$3)($1) - (NU$3)($2))", 29);
STRING_LITERAL(T839829468_338, "($4)((NU$3)($1) * (NU$3)($2))", 29);
STRING_LITERAL(T839829468_339, "($4)((NU$3)($1) / (NU$3)($2))", 29);
STRING_LITERAL(T839829468_340, "($4)((NU$3)($1) % (NU$3)($2))", 29);
STRING_LITERAL(T839829468_341, "($1 == $2)", 10);
STRING_LITERAL(T839829468_342, "($1 <= $2)", 10);
STRING_LITERAL(T839829468_343, "($1 < $2)", 9);
STRING_LITERAL(T839829468_344, "((NU$3)($1) <= (NU$3)($2))", 26);
STRING_LITERAL(T839829468_345, "((NU$3)($1) < (NU$3)($2))", 25);
STRING_LITERAL(T839829468_346, "((NU64)($1) <= (NU64)($2))", 26);
STRING_LITERAL(T839829468_347, "((NU64)($1) < (NU64)($2))", 25);
STRING_LITERAL(T839829468_348, "((NU8)($1) == (NU8)($2))", 24);
STRING_LITERAL(T839829468_349, "((NU8)($1) <= (NU8)($2))", 24);
STRING_LITERAL(T839829468_350, "((NU8)($1) < (NU8)($2))", 23);
STRING_LITERAL(T839829468_351, "($1 != $2)", 10);
NIM_CONST TY551828 binarithtab_551826_839829468 = {((NimStringDesc*) &T839829468_325),
((NimStringDesc*) &T839829468_326),
((NimStringDesc*) &T839829468_327),
((NimStringDesc*) &T839829468_328),
((NimStringDesc*) &T839829468_329),
((NimStringDesc*) &T839829468_330),
((NimStringDesc*) &T839829468_331),
((NimStringDesc*) &T839829468_332),
((NimStringDesc*) &T839829468_333),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_334),
((NimStringDesc*) &T839829468_335),
((NimStringDesc*) &T839829468_336),
((NimStringDesc*) &T839829468_337),
((NimStringDesc*) &T839829468_338),
((NimStringDesc*) &T839829468_339),
((NimStringDesc*) &T839829468_340),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_344),
((NimStringDesc*) &T839829468_345),
((NimStringDesc*) &T839829468_346),
((NimStringDesc*) &T839829468_347),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_348),
((NimStringDesc*) &T839829468_349),
((NimStringDesc*) &T839829468_350),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_341),
((NimStringDesc*) &T839829468_342),
((NimStringDesc*) &T839829468_343),
((NimStringDesc*) &T839829468_351)}
;
STRING_LITERAL(T839829468_352, "($1.ClPrc == $2.ClPrc && $1.ClEnv == $2.ClEnv)", 46);
STRING_LITERAL(T839829468_353, "($#)($# + $#)", 13);
STRING_LITERAL(T839829468_354, "($#)($# - $#)", 13);
STRING_LITERAL(T839829468_355, "($#)($# * $#)", 13);
STRING_LITERAL(T839829468_356, "($#)($# / $#)", 13);
STRING_LITERAL(T839829468_357, "($#)($# % $#)", 13);
NIM_CONST TY551281 opr_551279_839829468 = {((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354),
((NimStringDesc*) &T839829468_355),
((NimStringDesc*) &T839829468_356),
((NimStringDesc*) &T839829468_357),
((NimStringDesc*) &T839829468_353),
((NimStringDesc*) &T839829468_354)}
;
STRING_LITERAL(T839829468_358, "((NU8)($1))", 11);
STRING_LITERAL(T839829468_359, "if ($1 < $2 || $1 > $3) #raiseOverflow();$n", 43);
STRING_LITERAL(T839829468_360, "$# = #addInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_361, "$# = #subInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_362, "$# = #mulInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_363, "$# = #divInt64($#, $#);$n", 25);
STRING_LITERAL(T839829468_364, "$# = #modInt64($#, $#);$n", 25);
NIM_CONST TY551281 prc64_551274_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361),
((NimStringDesc*) &T839829468_362),
((NimStringDesc*) &T839829468_363),
((NimStringDesc*) &T839829468_364),
((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
STRING_LITERAL(T839829468_365, "$# = #addInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_366, "$# = #subInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_367, "$# = #mulInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_368, "$# = #divInt($#, $#);$n", 23);
STRING_LITERAL(T839829468_369, "$# = #modInt($#, $#);$n", 23);
NIM_CONST TY551281 prc_551269_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366),
((NimStringDesc*) &T839829468_367),
((NimStringDesc*) &T839829468_368),
((NimStringDesc*) &T839829468_369),
((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_370, "($#)($#)", 8);
STRING_LITERAL(T839829468_371, "#reprInt((NI64)$1)", 18);
STRING_LITERAL(T839829468_372, "#reprFloat($1)", 14);
STRING_LITERAL(T839829468_373, "#reprBool($1)", 13);
STRING_LITERAL(T839829468_374, "#reprChar($1)", 13);
STRING_LITERAL(T839829468_375, "#reprEnum((NI)$1, $2)", 21);
STRING_LITERAL(T839829468_376, "#reprStr($1)", 12);
STRING_LITERAL(T839829468_377, "#reprSet($1, $2)", 16);
STRING_LITERAL(T839829468_378, "$1, $1Len0", 10);
STRING_LITERAL(T839829468_379, "$1->data, $1->$2", 16);
STRING_LITERAL(T839829468_380, "$1, $2", 6);
STRING_LITERAL(T839829468_381, "genRepr()", 9);
STRING_LITERAL(T839829468_382, "#reprOpenArray($1, $2)", 22);
STRING_LITERAL(T839829468_383, "#reprAny($1, $2)", 16);
STRING_LITERAL(T839829468_384, "\'repr\' doesn\'t support \'void\' type", 34);
STRING_LITERAL(T839829468_385, "($1 - 1)", 8);
STRING_LITERAL(T839829468_386, "#subInt($1, 1)", 14);
STRING_LITERAL(T839829468_387, "binaryStmt", 10);
STRING_LITERAL(T839829468_388, "$1 += $2;$n", 11);
STRING_LITERAL(T839829468_389, "$1 -= $2;$n", 11);
NIM_CONST TY557052 opr_557050_839829468 = {((NimStringDesc*) &T839829468_388),
((NimStringDesc*) &T839829468_389)}
;
NIM_CONST TY557052 fun64_557055_839829468 = {((NimStringDesc*) &T839829468_360),
((NimStringDesc*) &T839829468_361)}
;
NIM_CONST TY557052 fun_557060_839829468 = {((NimStringDesc*) &T839829468_365),
((NimStringDesc*) &T839829468_366)}
;
STRING_LITERAL(T839829468_390, "#appendChar($1, $2);$n", 22);
STRING_LITERAL(T839829468_391, "$1->$2 + ", 9);
STRING_LITERAL(T839829468_392, "#appendString($1, $2);$n", 24);
STRING_LITERAL(T839829468_393, "$1 = #rawNewString($2$3);$n", 27);
STRING_LITERAL(T839829468_394, "$1 = #addChar($1, $2);$n", 24);
STRING_LITERAL(T839829468_395, "$1 = #resizeString($1, $2$3);$n", 31);
STRING_LITERAL(T839829468_396, "$1 = ($2) #incrSeqV2(&($1)->Sup, sizeof($3));$n", 47);
STRING_LITERAL(T839829468_397, "$1 = ($2) #incrSeqV2($1, sizeof($3));$n", 39);
STRING_LITERAL(T839829468_398, "$1->data[$1->$2]", 16);
STRING_LITERAL(T839829468_399, "++$1->$2;$n", 11);
STRING_LITERAL(T839829468_400, "(($1) && ($1)->$2 == 0)", 23);
STRING_LITERAL(T839829468_401, "#eqStrings($1, $2)", 18);
STRING_LITERAL(T839829468_402, "(#cmpStrings($1, $2) <= 0)", 26);
STRING_LITERAL(T839829468_403, "(#cmpStrings($1, $2) < 0)", 25);
STRING_LITERAL(T839829468_404, "$1.ClPrc == 0", 13);
STRING_LITERAL(T839829468_405, "$1 == 0", 7);
STRING_LITERAL(T839829468_406, "#nimIntToStr($1)", 16);
STRING_LITERAL(T839829468_407, "#nimInt64ToStr($1)", 18);
STRING_LITERAL(T839829468_408, "#nimBoolToStr($1)", 17);
STRING_LITERAL(T839829468_409, "#nimCharToStr($1)", 17);
STRING_LITERAL(T839829468_410, "#nimFloatToStr($1)", 18);
STRING_LITERAL(T839829468_411, "#cstrToNimstr($1)", 17);
STRING_LITERAL(T839829468_412, "no \'of\' operator available for pure objects", 43);
STRING_LITERAL(T839829468_413, "(($1) && ($2))", 14);
STRING_LITERAL(T839829468_414, "$1.m_type == $2", 15);
STRING_LITERAL(T839829468_415, "Nim_OfCheck_CACHE", 17);
STRING_LITERAL(T839829468_416, "static TNimType* $#[2];$n", 25);
STRING_LITERAL(T839829468_417, "#isObjWithCache($#.m_type, $#, $#)", 34);
STRING_LITERAL(T839829468_418, "($1)", 4);
STRING_LITERAL(T839829468_419, "sizeof($1)", 10);
STRING_LITERAL(T839829468_420, "if ($1) #nimGCunref($1);$n", 26);
STRING_LITERAL(T839829468_421, "($1) #newObjRC1($2, $3)", 23);
STRING_LITERAL(T839829468_422, "($1) #newObj($2, $3)", 20);
STRING_LITERAL(T839829468_423, "$1->finalizer = (void*)$2;$n", 28);
STRING_LITERAL(T839829468_424, "($1) #newObj($2, sizeof($3))", 28);
STRING_LITERAL(T839829468_425, "($1) #newSeqRC1($2, $3)", 23);
STRING_LITERAL(T839829468_426, "($1) #newSeq($2, $3)", 20);
STRING_LITERAL(T839829468_427, "($1)#nimNewSeqOfCap($2, $3)", 27);
STRING_LITERAL(T839829468_428, "((NI)sizeof($1))", 16);
STRING_LITERAL(T839829468_429, "(*($1*) ($2))", 13);
STRING_LITERAL(T839829468_430, "(($1) ($2))", 11);
STRING_LITERAL(T839829468_431, "($1Len0-1)", 10);
STRING_LITERAL(T839829468_432, "$1Len0", 6);
STRING_LITERAL(T839829468_433, "($1 ? (strlen($1)-1) : -1)", 26);
STRING_LITERAL(T839829468_434, "($1 ? strlen($1) : 0)", 21);
STRING_LITERAL(T839829468_435, "($1 ? ($1->Sup.len-1) : -1)", 27);
STRING_LITERAL(T839829468_436, "($1 ? $1->Sup.len : 0)", 22);
STRING_LITERAL(T839829468_437, "($1 ? ($1->len-1) : -1)", 23);
STRING_LITERAL(T839829468_438, "($1 ? $1->len : 0)", 18);
STRING_LITERAL(T839829468_439, "genArrayLen()", 13);
STRING_LITERAL(T839829468_440, "($1->Sup.len)", 13);
STRING_LITERAL(T839829468_441, "$1->len", 7);
STRING_LITERAL(T839829468_442, "unaryStmt", 9);
STRING_LITERAL(T839829468_443, "#nimGCref($1);$n", 16);
STRING_LITERAL(T839829468_444, "#nimGCunref($1);$n", 18);
STRING_LITERAL(T839829468_445, "$1 = #setLengthStr($1, $2);$n", 29);
STRING_LITERAL(T839829468_446, "$1 = ($3) #setLengthSeq(&($1)->Sup, sizeof($4), $2);$n", 54);
STRING_LITERAL(T839829468_447, "$1 = ($3) #setLengthSeq($1, sizeof($4), $2);$n", 46);
STRING_LITERAL(T839829468_448, "($1- $2)", 8);
STRING_LITERAL(T839829468_449, "$1 |= ((", 8);
STRING_LITERAL(T839829468_450, ")1)<<(($2)%(sizeof(", 19);
STRING_LITERAL(T839829468_451, ")*8));$n", 8);
STRING_LITERAL(T839829468_452, "$1 &= ~(((", 10);
STRING_LITERAL(T839829468_453, ")1) << (($2) % (sizeof(", 23);
STRING_LITERAL(T839829468_454, ")*8)));$n", 9);
STRING_LITERAL(T839829468_455, "#countBits32($1)", 16);
STRING_LITERAL(T839829468_456, "#countBits64($1)", 16);
STRING_LITERAL(T839829468_457, "(($1 & ~ $2 ==0)&&($1 != $2))", 29);
STRING_LITERAL(T839829468_458, "(($1 & ~ $2)==0)", 16);
STRING_LITERAL(T839829468_459, "($1 & $2)", 9);
STRING_LITERAL(T839829468_460, "($1 | $2)", 9);
STRING_LITERAL(T839829468_461, "($1 & ~ $2)", 11);
STRING_LITERAL(T839829468_462, "($1 ^ $2)", 9);
STRING_LITERAL(T839829468_463, "fewCmps", 7);
STRING_LITERAL(T839829468_464, "$1 >= $2 && $1 <= $3", 20);
STRING_LITERAL(T839829468_465, "$1 == $2", 8);
STRING_LITERAL(T839829468_466, " || ", 4);
STRING_LITERAL(T839829468_467, "(($1 &(1U<<((NU)($2)&7U)))!=0)", 30);
STRING_LITERAL(T839829468_468, "(($1 &(1U<<((NU)($2)&15U)))!=0)", 31);
STRING_LITERAL(T839829468_469, "(($1 &(1U<<((NU)($2)&31U)))!=0)", 31);
STRING_LITERAL(T839829468_470, "(($1 &((NU64)1<<((NU)($2)&63U)))!=0)", 36);
STRING_LITERAL(T839829468_471, "(($1[(NU)($2)>>3] &(1U<<((NU)($2)&7U)))!=0)", 43);
STRING_LITERAL(T839829468_472, "genSetOp()", 10);
STRING_LITERAL(T839829468_473, "$1[(NU)($2)>>3] |=(1U<<($2&7U));$n", 34);
STRING_LITERAL(T839829468_474, "$1[(NU)($2)>>3] &= ~(1U<<($2&7U));$n", 36);
STRING_LITERAL(T839829468_475, "#cardSet($1, ", 13);
STRING_LITERAL(T839829468_476, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$n", 88);
STRING_LITERAL(T839829468_477, "for ($1 = 0; $1 < $2; $1++) { $n $3 = (($4[$1] & ~ $5[$1]) == "
"0);$n if (!$3) break;}$nif ($3) $3 = (memcmp($4, $5, $2) != 0);"
"$n", 129);
STRING_LITERAL(T839829468_478, "|", 1);
STRING_LITERAL(T839829468_479, "& ~", 3);
STRING_LITERAL(T839829468_480, "^", 1);
NIM_CONST TY556428 lookupopr_556426_839829468 = {((NimStringDesc*) &T839829468_476),
((NimStringDesc*) &T839829468_477),
((NimStringDesc*) &T839829468_52),
((NimStringDesc*) &T839829468_478),
((NimStringDesc*) &T839829468_479),
((NimStringDesc*) &T839829468_480)}
;
STRING_LITERAL(T839829468_481, "(memcmp($1, $2, ", 16);
STRING_LITERAL(T839829468_482, ")==0)", 5);
STRING_LITERAL(T839829468_483, "for ($1 = 0; $1 < $2; $1++) $n $3[$1] = $4[$1] $6 $5[$1];$n", 60);
STRING_LITERAL(T839829468_484, "genSetOp", 8);
STRING_LITERAL(T839829468_485, "$1->data", 8);
STRING_LITERAL(T839829468_486, "($1)+($2), ($3)-($2)+1", 22);
STRING_LITERAL(T839829468_487, "(*$1)->data+($2), ($3)-($2)+1", 29);
STRING_LITERAL(T839829468_488, "$1->data+($2), ($3)-($2)+1", 26);
STRING_LITERAL(T839829468_489, "openArrayLoc: ", 14);
STRING_LITERAL(T839829468_490, "", 0);
STRING_LITERAL(T839829468_491, "(*$1)->data, (*$1)->$2", 22);
STRING_LITERAL(T839829468_492, "$1.ClPrc($3$1.ClEnv)", 20);
STRING_LITERAL(T839829468_493, "$1.ClEnv? $1.ClPrc($3$1.ClEnv):(($4)($1.ClPrc))($2)", 51);
STRING_LITERAL(T839829468_494, "$1 = 0;$n", 9);
STRING_LITERAL(T839829468_495, "#chckNil((void*)$1);$n", 22);
STRING_LITERAL(T839829468_496, "#genericReset((void*)$1, $2);$n", 31);
STRING_LITERAL(T839829468_497, ";$n", 3);
STRING_LITERAL(T839829468_499, "compiler/ccgcalls.nim", 21);
NIM_CONST TY203018 T839829468_498 = {((NimStringDesc*) &T839829468_499),
((NI) 423)}
;
static NIM_CONST char136Set T839829468_500 = {
0x00, 0x00, 0x00, 0x00, 0x88, 0x01, 0x00, 0x00,
0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}
;
STRING_LITERAL(T839829468_501, "wrong argument count", 20);
STRING_LITERAL(T839829468_502, "call expression expected for C++ pattern", 40);
NIM_CONST TY203018 T839829468_503 = {((NimStringDesc*) &T839829468_499),
((NI) 328)}
;
STRING_LITERAL(T839829468_504, "->", 2);
STRING_LITERAL(T839829468_505, ");$n", 4);
STRING_LITERAL(T839829468_506, "[", 1);
NIM_CONST TY203018 T839829468_507 = {((NimStringDesc*) &T839829468_499),
((NI) 472)}
;
STRING_LITERAL(T839829468_508, "varargs for objective C method?", 31);
STRING_LITERAL(T839829468_509, "Result: ", 8);
STRING_LITERAL(T839829468_510, "];$n", 4);
STRING_LITERAL(T839829468_511, "]", 1);
NIM_CONST TY203018 T839829468_512 = {((NimStringDesc*) &T839829468_265),
((NI) 925)}
;
STRING_LITERAL(T839829468_513, "<stdio.h>", 9);
STRING_LITERAL(T839829468_514, ", \"nil\"", 7);
STRING_LITERAL(T839829468_515, ", $1? ($1)->data:\"nil\"", 22);
STRING_LITERAL(T839829468_516, "printf($1$2);$n", 15);
STRING_LITERAL(T839829468_517, "%s", 2);
STRING_LITERAL(T839829468_518, "fflush(stdout);$n", 17);
STRING_LITERAL(T839829468_519, "#genericDeepCopy((void*)$1, (void*)$2, $3);$n", 45);
STRING_LITERAL(T839829468_520, "#genericSeqDeepCopy($1, $2, $3);$n", 34);
STRING_LITERAL(T839829468_521, "#genericDeepCopyOpenArray((void*)$1, (void*)$2, $1Len0, $3);$n", 62);
STRING_LITERAL(T839829468_522, "genDeepCopy: ", 13);
STRING_LITERAL(T839829468_523, "genMagicExpr: ", 14);
STRING_LITERAL(T839829468_524, "static NIM_CONST $1 $2 = $3;$n", 30);
STRING_LITERAL(T839829468_525, "memset($1, 0, sizeof($1));$n", 28);
STRING_LITERAL(T839829468_526, "for ($1 = $3; $1 <= $4; $1++) $n$2[(NU)($1)>>3] |=(1U<<((NU)($1"
")&7U));$n", 72);
STRING_LITERAL(T839829468_527, "$1[(NU)($2)>>3] |=(1U<<((NU)($2)&7U));$n", 40);
STRING_LITERAL(T839829468_528, "for ($1 = $3; $1 <= $4; $1++) $n$2 |=((", 39);
STRING_LITERAL(T839829468_529, ")(1)<<(($1)%(sizeof(", 20);
STRING_LITERAL(T839829468_530, "$1 |=((", 7);
STRING_LITERAL(T839829468_531, ")(1)<<(($2)%(sizeof(", 20);
STRING_LITERAL(T839829468_532, "genCheckedRecordField", 21);
STRING_LITERAL(T839829468_533, "genObjConstr", 12);
STRING_LITERAL(T839829468_534, "if ($1) #raiseFieldError(((#NimStringDesc*) &$2));$n", 52);
STRING_LITERAL(T839829468_535, "if (!($1)) #raiseFieldError(((#NimStringDesc*) &$2));$n", 55);
STRING_LITERAL(T839829468_536, "LOC$1.source", 12);
STRING_LITERAL(T839829468_537, "union { $1 source; $2 dest; } LOC$3;$n", 38);
STRING_LITERAL(T839829468_538, "LOC$#.dest", 10);
STRING_LITERAL(T839829468_539, "if ((NU)($1) > (NU)($2)) #raiseIndexError();$n", 46);
STRING_LITERAL(T839829468_540, "if ($1 < $2 || $1 > $3) #raiseIndexError();$n", 45);
STRING_LITERAL(T839829468_541, "$1[($2)- $3]", 12);
STRING_LITERAL(T839829468_542, "if ((NU)($1) >= (NU)($2Len0)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_543, "if ((NU)($1) > (NU)($2->$3)) #raiseIndexError();$n", 50);
STRING_LITERAL(T839829468_544, "if ((NU)($1) >= (NU)($2->$3)) #raiseIndexError();$n", 51);
STRING_LITERAL(T839829468_545, "genTupleElem", 12);
STRING_LITERAL(T839829468_546, ".Field$1", 8);
STRING_LITERAL(T839829468_547, "expr(nkBracketExpr, ", 20);
STRING_LITERAL(T839829468_548, "genDeref ", 9);
STRING_LITERAL(T839829468_549, "genRecordFieldAux", 17);
STRING_LITERAL(T839829468_550, "genRecordField 3", 16);
STRING_LITERAL(T839829468_551, ".$1", 3);
STRING_LITERAL(T839829468_552, "} $1: ;$n", 9);
STRING_LITERAL(T839829468_553, "FR.len-=$1;$n", 13);
STRING_LITERAL(T839829468_554, "FR.len+=$1;$n", 13);
STRING_LITERAL(T839829468_555, "if (!$1) goto $2;$n", 19);
STRING_LITERAL(T839829468_556, "goto $1;$n", 10);
STRING_LITERAL(T839829468_557, "genIf()", 7);
STRING_LITERAL(T839829468_558, "->Sup", 5);
STRING_LITERAL(T839829468_559, "$1 = &$2;$n", 11);
STRING_LITERAL(T839829468_560, "if ($1) #chckObj($2.m_type, $3);$n", 34);
STRING_LITERAL(T839829468_561, "#chckObj($1.m_type, $2);$n", 26);
STRING_LITERAL(T839829468_562, "(($1)#$5($2, $3, $4))", 21);
STRING_LITERAL(T839829468_563, "chckRangeF", 10);
STRING_LITERAL(T839829468_564, "chckRange64", 11);
STRING_LITERAL(T839829468_565, "chckRange", 9);
STRING_LITERAL(T839829468_566, "CNSTCLOSURE", 11);
STRING_LITERAL(T839829468_567, "closure to closure created", 26);
STRING_LITERAL(T839829468_568, "$1.ClPrc = $2; $1.ClEnv = $3;$n", 31);
STRING_LITERAL(T839829468_569, "while (1) {$n", 13);
STRING_LITERAL(T839829468_570, "case statement must be exhaustive for computed goto", 51);
STRING_LITERAL(T839829468_571, "case statement has too many cases for computed goto", 51);
STRING_LITERAL(T839829468_572, "case statement has to start at 0 for computed goto", 50);
STRING_LITERAL(T839829468_573, "no case statement found for computed goto", 41);
STRING_LITERAL(T839829468_574, "TMP$1", 5);
STRING_LITERAL(T839829468_575, "static void* $#[$#] = {", 23);
STRING_LITERAL(T839829468_576, "&&TMP$#, ", 9);
STRING_LITERAL(T839829468_577, "&&TMP$#};$n", 11);
STRING_LITERAL(T839829468_578, "goto *$#[$#];$n", 15);
STRING_LITERAL(T839829468_579, "range notation not available for computed goto", 46);
STRING_LITERAL(T839829468_580, "TMP$#:$n", 8);
STRING_LITERAL(T839829468_581, "#nimProfile();$n", 16);
STRING_LITERAL(T839829468_582, "\'goto\' target must be a literal value", 37);
STRING_LITERAL(T839829468_583, "goto NIMSTATE_$#;$n", 19);
STRING_LITERAL(T839829468_584, "$1 = ($2*) #nimGetProcAddr($3, $4);$n", 37);
STRING_LITERAL(T839829468_585, "$2* $1;$n", 9);
STRING_LITERAL(T839829468_586, "#dbgRegisterGlobal($1, &$2, $3);$n", 34);
STRING_LITERAL(T839829468_587, "#nimGCvisit((void*)$1, 0);$n", 28);
STRING_LITERAL(T839829468_588, "N_NIMCALL(void, $1)(void)", 25);
STRING_LITERAL(T839829468_589, "#nimRegisterGlobalMarker($1);$n", 31);
STRING_LITERAL(T839829468_590, "$#($#);$n", 9);
STRING_LITERAL(T839829468_591, "$# = $#;$n", 10);
STRING_LITERAL(T839829468_592, "genVarTuple", 11);
STRING_LITERAL(T839829468_593, "genConstStmt", 12);
STRING_LITERAL(T839829468_594, "for statement not eliminated", 28);
STRING_LITERAL(T839829468_595, "if (#eqStrings($1, $2)) goto $3;$n", 34);
STRING_LITERAL(T839829468_596, "switch (#hashString($1) & $2) {$n", 33);
STRING_LITERAL(T839829468_597, "case $1: $n$2break;$n", 21);
STRING_LITERAL(T839829468_598, "goto LA$1;$n", 12);
STRING_LITERAL(T839829468_599, "LA$1: ;$n", 9);
STRING_LITERAL(T839829468_600, "if ($1 >= $2 && $1 <= $3) goto $4;$n", 36);
STRING_LITERAL(T839829468_601, "if ($1 == $2) goto $3;$n", 24);
STRING_LITERAL(T839829468_602, "NIMSTATE_$#:$n", 14);
STRING_LITERAL(T839829468_603, "switch ($1) {$n", 15);
STRING_LITERAL(T839829468_604, "default: __assume(0);$n", 23);
STRING_LITERAL(T839829468_605, "#popSafePoint();$n", 18);
STRING_LITERAL(T839829468_606, "#popCurrentException();$n", 25);
STRING_LITERAL(T839829468_607, "if ($1.status != 0) #popCurrentException();$n", 45);
STRING_LITERAL(T839829468_608, "goto BeforeRet;$n", 17);
STRING_LITERAL(T839829468_609, "no loop to break", 16);
STRING_LITERAL(T839829468_610, "extern $1", 9);
STRING_LITERAL(T839829468_611, "#FieldDiscriminantCheck((NI)(NU)($1), (NI)(NU)($2), $3, $4);$n", 62);
STRING_LITERAL(T839829468_612, "genAsmOrEmitStmt()", 18);
STRING_LITERAL(T839829468_613, "\"", 1);
STRING_LITERAL(T839829468_614, "\\n\"\012", 4);
STRING_LITERAL(T839829468_615, "Exception", 9);
STRING_LITERAL(T839829468_616, "E_Base", 6);
STRING_LITERAL(T839829468_617, "try {$n", 7);
STRING_LITERAL(T839829468_618, "} catch (NimException& $1) {$n", 30);
STRING_LITERAL(T839829468_619, "#setFrame((TFrame*)&FR);$n", 26);
STRING_LITERAL(T839829468_620, "else ", 5);
STRING_LITERAL(T839829468_621, "#isObj($1.exp->m_type, $2)", 26);
STRING_LITERAL(T839829468_622, "if ($1) ", 8);
STRING_LITERAL(T839829468_623, "throw;$n", 8);
STRING_LITERAL(T839829468_624, "<setjmp.h>", 10);
STRING_LITERAL(T839829468_625, "#TSafePoint $1;$n", 17);
STRING_LITERAL(T839829468_626, "#pushSafePoint(&$1);$n", 22);
STRING_LITERAL(T839829468_627, "nimStdSetjmp", 12);
STRING_LITERAL(T839829468_628, "$1.status = setjmp($1.context);$n", 33);
STRING_LITERAL(T839829468_629, "nimSigSetjmp", 12);
STRING_LITERAL(T839829468_630, "$1.status = sigsetjmp($1.context, 0);$n", 39);
STRING_LITERAL(T839829468_631, "nimRawSetjmp", 12);
STRING_LITERAL(T839829468_632, "$1.status = _setjmp($1.context);$n", 34);
STRING_LITERAL(T839829468_633, "if ($1.status == 0) {$n", 23);
STRING_LITERAL(T839829468_634, "else {$n", 8);
STRING_LITERAL(T839829468_635, "else", 4);
STRING_LITERAL(T839829468_636, "$1.status = 0;$n", 16);
STRING_LITERAL(T839829468_637, "#isObj(#getCurrentException()->Sup.m_type, $1)", 46);
STRING_LITERAL(T839829468_638, "#isObj(#getCurrentException()->m_type, $1)", 42);
STRING_LITERAL(T839829468_639, "if ($1) {$n", 11);
STRING_LITERAL(T839829468_640, "if ($1.status != 0) #reraiseException();$n", 42);
STRING_LITERAL(T839829468_641, "#raiseException((#Exception*)$1, $2);$n", 39);
STRING_LITERAL(T839829468_642, "#reraiseException();$n", 22);
STRING_LITERAL(T839829468_643, "/*TYPESECTION*/", 15);
STRING_LITERAL(T839829468_644, "/*VARSECTION*/", 14);
STRING_LITERAL(T839829468_645, "/*INCLUDESECTION*/", 18);
STRING_LITERAL(T839829468_646, "bp", 2);
STRING_LITERAL(T839829468_647, "#dbgRegisterBreakpoint($1, (NCSTRING)$2, (NCSTRING)$3);$n", 57);
STRING_LITERAL(T839829468_648, "#dbgRegisterWatchpoint($1, (NCSTRING)$2, $3);$n", 47);
STRING_LITERAL(T839829468_649, "#pragma omp parallel for $4$nfor ($1 = $2; $1 <= $3; ++$1)", 58);
STRING_LITERAL(T839829468_651, "compiler/ccgstmts.nim", 21);
NIM_CONST TY203018 T839829468_650 = {((NimStringDesc*) &T839829468_651),
((NI) 145)}
;
STRING_LITERAL(T839829468_652, "STATE$1: ;$n", 12);
STRING_LITERAL(T839829468_653, "case -1: goto BeforeRet;$n", 26);
STRING_LITERAL(T839829468_654, "case $1: goto STATE$1;$n", 24);
STRING_LITERAL(T839829468_655, "if (((NI*) $1)[0] < 0) break;$n", 31);
STRING_LITERAL(T839829468_656, "if ((((NI*) $1.ClEnv)[0]) < 0) break;$n", 39);
STRING_LITERAL(T839829468_657, "); unknown node kind", 20);
NIM_CONST TY203018 T839829468_658 = {((NimStringDesc*) &T839829468_651),
((NI) 1122)}
;
STRING_LITERAL(T839829468_659, "Init000", 7);
STRING_LITERAL(T839829468_660, "DatInit000", 10);
STRING_LITERAL(T839829468_661, "NIM_EXTERNC N_NOINLINE(void, $1)(void);$N", 41);
STRING_LITERAL(T839829468_662, "\011$1();$N", 8);
STRING_LITERAL(T839829468_663, "N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDECL(void, NimMa"
"in)(void) {$N\011void (*volatile inner)();$N\011PreMain();$N\011inner = N"
"imMainInner;$N$2\011(*inner)();$N}$N$N", 162);
STRING_LITERAL(T839829468_664, "N_STDCALL(int, WinMain)(HINSTANCE hCurInstance, $N "
" HINSTANCE hPrevInstance, $N LP"
"STR lpCmdLine, int nCmdShow) {$N\011NimMain();$N\011return nim_program"
"_result;$N}$N$N", 206);
STRING_LITERAL(T839829468_665, "N_LIB_EXPORT N_CDECL(void, NimMainInner)(void) {$N$1}$N$NN_CDEC"
"L(void, NimMain)(void) {$N\011void (*volatile inner)();$N\011PreMain()"
";$N\011inner = NimMainInner;$N$2\011(*inner)();$N}$N$N", 175);
STRING_LITERAL(T839829468_666, "BOOL WINAPI DllMain(HINSTANCE hinstDLL, DWORD fwdreason, $N "
" LPVOID lpvReserved) {$N\011if(fwdreason == DLL_PROC"
"ESS_ATTACH) {$N\011NimMain();$N}$N\011return 1;$N}$N$N", 175);
STRING_LITERAL(T839829468_667, "<windows.h>", 11);
STRING_LITERAL(T839829468_668, "void NIM_POSIX_INIT NimMainInit(void) {$N\011NimMain();$N}$N$N", 59);
STRING_LITERAL(T839829468_669, "int cmdCount;$Nchar** cmdLine;$Nchar** gEnv;$NN_CDECL(void, Nim"
"MainInner)(void) {$N$1}$N$NN_CDECL(void, NimMain)(void) {$N\011void"
" (*volatile inner)();$N\011PreMain();$N\011inner = NimMainInner;$N$2\011("
"*inner)();$N}$N$N", 208);
STRING_LITERAL(T839829468_670, "int main(void) {$N\011NimMain();$N\011return 0;$N}$N$N", 48);
STRING_LITERAL(T839829468_671, "int main(int argc, char** args, char** env) {$N\011cmdLine = args;"
"$N\011cmdCount = argc;$N\011gEnv = env;$N\011NimMain();$N\011return nim_prog"
"ram_result;$N}$N$N", 145);
STRING_LITERAL(T839829468_672, "dbgRegisterBreakpoint", 21);
STRING_LITERAL(T839829468_673, "dbgRegisterFilename", 19);
STRING_LITERAL(T839829468_674, "dbgRegisterFilename($1);$N", 26);
STRING_LITERAL(T839829468_675, "\011#initStackBottomWith((void *)&inner);$N", 40);
STRING_LITERAL(T839829468_676, "void PreMainInner() {$N\011systemInit000();$N$1$2$3}$N$Nvoid PreMa"
"in() {$N\011void (*volatile inner)();$N\011systemDatInit000();$N\011inner"
" = PreMainInner;$N$4$5\011(*inner)();$N}$N$N", 168);
STRING_LITERAL(T839829468_677, "\011#initThreadVarsEmulation();$N", 30);
STRING_LITERAL(T839829468_678, "still forwarded: ", 17);
STRING_LITERAL(T839829468_679, "NIM_EXTERNC N_NOINLINE(void, $1)(void) {$N", 42);
STRING_LITERAL(T839829468_680, "static #TNimNode $1[$2];$n", 26);
STRING_LITERAL(T839829468_681, "static #TNimType $1[$2];$n", 26);
STRING_LITERAL(T839829468_682, "\011TFrame FR; FR.len = 0;$N", 25);
STRING_LITERAL(T839829468_683, "}$N$N", 5);
STRING_LITERAL(T839829468_684, "N_NIMCALL(void, nimLoadProcs$1)(void) {$2}$N$N", 46);
STRING_LITERAL(T839829468_685, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N", 131);
STRING_LITERAL(T839829468_686, "0.15.0", 6);
STRING_LITERAL(T839829468_687, "/* Generated by Nim Compiler v$1 */$N/* (c) 2016 Andreas Rump"
"f */$N/* The generated code is subject to the original license. "
"*/$N/* Compiled for: $2, $3, $4 */$N/* Command for C compiler:$n"
" $5 */$N", 201);
extern NIM_CONST TY176082 Os_176068_4151366050;
extern NIM_CONST TY176510 Cpu_176496_4151366050;
STRING_LITERAL(T839829468_688, "#define NIM_INTBITS $1", 22);
STRING_LITERAL(T839829468_689, "typedef struct {$1} NimThreadVars;$n", 36);
STRING_LITERAL(T839829468_690, "#include \"nimbase.h\"", 20);
STRING_LITERAL(T839829468_691, "#include \"$1\"$N", 15);
STRING_LITERAL(T839829468_692, "#include $1$N", 13);
STRING_LITERAL(T839829468_693, "extern \"C\"", 10);
STRING_LITERAL(T839829468_694, "$#NI NimThreadVarsSize(){return (NI)sizeof(NimThreadVars);}$n", 61);
STRING_LITERAL(T839829468_695, "__$1__", 6);
STRING_LITERAL(T839829468_696, "#ifndef $1$n#define $1$n", 24);
STRING_LITERAL(T839829468_697, "N_CDECL(void, NimMain)(void);$n", 31);
STRING_LITERAL(T839829468_698, "#endif /* $1 */$n", 17);
Tcgen529027* generatedheader_532201_839829468;
extern TNimType NTI529015; /* BModule */
Ropeobj178006* indent_532655_839829468;
extern TNimType NTI178004; /* Rope */
extern Gcheap49818 gch_49858_1689653243;
Ropeobj178006* nimtv_538656_839829468;
Ttypeseq292836* nimtvdeps_538674_839829468;
extern TNimType NTI292836; /* TTypeSeq */
Intset268030 nimtvdeclared_538675_839829468;
extern TNimType NTI268030; /* IntSet */
NI breakpointid_548860_839829468;
Ropeobj178006* gbreakpoints_548861_839829468;
extern TY529153* gmodules_529170_3723162438;
extern TNimType NTI529027; /* TCGen */
extern Debuginfo203009 gdebuginfo_203470_1926258066;
extern Toption169009Set goptions_169128_2607990831;
extern TNimType NTI292804; /* TSymSeq */
extern Tglobaloption169013Set gglobaloptions_169130_2607990831;
extern NimStringDesc* headerfile_169138_2607990831;
extern NimStringDesc* gprojectfull_169211_2607990831;
extern Tcommands169076 gcmd_169132_2607990831;
extern NI gerrorcounter_192069_155036129;
extern Ropeobj178006* rnl_178903_2381377266;
extern NI gforwardedprocscounter_529171_3723162438;
extern TNimType NTI292244; /* TTypeKind */
extern TNimType NTI203017; /* seq[(string, int)] */
extern Tsystemcc273002 ccompiler_273431_2528170400;
extern NimStringDesc* tnl_176644_4151366050;
extern NI floatsize_176642_4151366050;
extern Tgcmode169080 gselectedgc_169133_2607990831;
extern TNimType NTI292020; /* TNodeKind */
extern TNimType NTI135002; /* seq[string] */
extern TNimType NTI292435; /* TSymKind */
extern TNimType NTI292816; /* TLoc */
extern NI intsize_176641_4151366050;
extern TNimType NTI292524; /* TMagic */
extern TNimType NTI191350; /* seq[Rope] */
extern TNimType NTI292796; /* TNodeSeq */
extern Ropeobj178006* mainmodprocs_529148_3723162438;
extern Ropeobj178006* maindatinit_529151_3723162438;
extern Ropeobj178006* mainmodinit_529149_3723162438;
extern Ropeobj178006* othermodsinit_529150_3723162438;
extern Tsystemos176004 targetos_176629_4151366050;
extern TY191612* fileinfos_191629_155036129;
extern Tsystemcpu176452 targetcpu_176627_4151366050;
extern Ropeobj178006* gmapping_529152_3723162438;
N_NIMCALL(void, T839829468_2)(void) {
nimGCvisit((void*)generatedheader_532201_839829468, 0);
}
N_NIMCALL(void, T839829468_3)(void) {
nimGCvisit((void*)indent_532655_839829468, 0);
}
static N_INLINE(Cell47305*, usrtocell_51440_1689653243)(void* usr0) {
Cell47305* result0;
result0 = (Cell47305*)0;
result0 = ((Cell47305*) ((NI)((NU64)(((NI) (usr0))) - (NU64)(((NI)sizeof(Cell47305))))));
return result0;
}
static N_INLINE(void, rtladdzct_52601_1689653243)(Cell47305* c0) {
addzct_51417_1689653243((&gch_49858_1689653243.zct), c0);
}
static N_INLINE(void, asgnRefNoCycle)(void** dest0, void* src0) {
{
Cell47305* c0;
if (!!((src0 == NIM_NIL))) goto LA3;
c0 = usrtocell_51440_1689653243(src0);
(*c0).refcount += ((NI) 8);
}
LA3: ;
{
Cell47305* c0;
if (!!(((*dest0) == NIM_NIL))) goto LA7;
c0 = usrtocell_51440_1689653243((*dest0));
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA11;
rtladdzct_52601_1689653243(c0);
}
LA11: ;
}
LA7: ;
(*dest0) = src0;
}
N_NIMCALL(void, T839829468_5)(void) {
nimGCvisit((void*)nimtv_538656_839829468, 0);
}
N_NIMCALL(void, T839829468_6)(void) {
nimGCvisit((void*)nimtvdeps_538674_839829468, 0);
}
static N_INLINE(void, nimGCunrefNoCycle)(void* p0) {
Cell47305* c0;
c0 = usrtocell_51440_1689653243(p0);
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
N_NIMCALL(void, T839829468_7)(void) {
nimGCvisit((void*)nimtvdeclared_538675_839829468.head, 0);
nimGCvisit((void*)nimtvdeclared_538675_839829468.data, 0);
}
N_NIMCALL(void, T839829468_8)(void) {
nimGCvisit((void*)gbreakpoints_548861_839829468, 0);
}
N_NIMCALL(Tcgen529027*, getcgenmodule_532226_839829468)(Tsym292834* s0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (((NI) 0) <= (*s0).position);
if (!(LOC3)) goto LA4;
LOC3 = ((*s0).position < (gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = gmodules_529170_3723162438->data[(*s0).position];
}
goto LA1;
LA5: ;
{
result0 = NIM_NIL;
}
LA1: ;
return result0;
}
static N_INLINE(void, copymem_7485_1689653243)(void* dest0, void* source0, NI size0) {
void* LOC1;
LOC1 = (void*)0;
LOC1 = memcpy(dest0, source0, ((size_t) (size0)));
}
static N_INLINE(void, appendString)(NimStringDesc* dest0, NimStringDesc* src0) {
copymem_7485_1689653243(((void*) ((&(*dest0).data[((*dest0).Sup.len)- 0]))), ((void*) ((*src0).data)), ((NI) ((NI)((*src0).Sup.len + ((NI) 1)))));
(*dest0).Sup.len += (*src0).Sup.len;
}
N_NIMCALL(NU32, hashowner_532977_839829468)(Tsym292834* s0) {
NU32 result0;
Tsym292834* m0;
Tsym292834* p0;
result0 = (NU32)0;
m0 = s0;
{
while (1) {
if (!!(((*m0).kind == ((Tsymkind292435) 6)))) goto LA2;
m0 = (*m0).owner;
} LA2: ;
}
p0 = (*m0).owner;
result0 = register_203121_1926258066((&gdebuginfo_203470_1926258066), (*(*p0).name).s, (*(*m0).name).s);
return result0;
}
static N_INLINE(void, incref_53419_1689653243)(Cell47305* c0) {
(*c0).refcount = (NI)((NU64)((*c0).refcount) + (NU64)(((NI) 8)));
}
static N_INLINE(void, decref_53001_1689653243)(Cell47305* c0) {
{
(*c0).refcount -= ((NI) 8);
if (!((NU64)((*c0).refcount) < (NU64)(((NI) 8)))) goto LA3;
rtladdzct_52601_1689653243(c0);
}
LA3: ;
}
static N_INLINE(void, asgnRef)(void** dest0, void* src0) {
{
Cell47305* LOC5;
if (!!((src0 == NIM_NIL))) goto LA3;
LOC5 = (Cell47305*)0;
LOC5 = usrtocell_51440_1689653243(src0);
incref_53419_1689653243(LOC5);
}
LA3: ;
{
Cell47305* LOC10;
if (!!(((*dest0) == NIM_NIL))) goto LA8;
LOC10 = (Cell47305*)0;
LOC10 = usrtocell_51440_1689653243((*dest0));
decref_53001_1689653243(LOC10);
}
LA8: ;
(*dest0) = src0;
}
N_NIMCALL(Toption169009Set, initprocoptions_562635_839829468)(Tcgen529027* m0) {
Toption169009Set result0;
memset((void*)(&result0), 0, sizeof(result0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
result0 = (goptions_169128_2607990831 & ~ 32768);
}
goto LA1;
LA3: ;
{
result0 = goptions_169128_2607990831;
}
LA1: ;
return result0;
}
N_NIMCALL(Tcproc529021*, newpreinitproc_562625_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 100000);
return result0;
}
N_NIMCALL(Tcproc529021*, newpostinitproc_562630_839829468)(Tcgen529027* m0) {
Tcproc529021* result0;
result0 = (Tcproc529021*)0;
result0 = newproc_529206_3723162438(NIM_NIL, m0);
(*result0).labels = ((NI) 200000);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettempname_533598_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*m0).labels)));
result0 = HEX26_178418_2381377266((*m0).tmpbase, LOC1);
(*m0).labels += ((NI) 1);
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_562663_839829468)(Tsym292834* module0, NimStringDesc* filename0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
NU32 LOC2;
NimStringDesc* LOC3;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
result0 = (Tcgen529027*)0;
result0 = (Tcgen529027*) newObj((&NTI529015), sizeof(Tcgen529027));
(*result0).Sup.Sup.m_type = (&NTI529027);
LOC1 = (NimStringDesc*)0;
LOC2 = (NU32)0;
LOC2 = hashowner_532977_839829468(module0);
LOC3 = (NimStringDesc*)0;
LOC3 = HEX24_8401_1689653243(((NU64) (LOC2)));
LOC1 = rawNewString(LOC3->Sup.len + 2);
appendString(LOC1, ((NimStringDesc*) &T839829468_11));
appendString(LOC1, LOC3);
appendString(LOC1, ((NimStringDesc*) &T839829468_12));
asgnRefNoCycle((void**) (&(*result0).tmpbase), rope_178277_2381377266(LOC1));
initlinkedlist_147031_3771138726((&(*result0).headerfiles));
initintset_268885_2627731572((&(*result0).declaredthings));
initintset_268885_2627731572((&(*result0).declaredprotos));
LOC4 = (NimStringDesc*)0;
LOC4 = (*result0).cfilename; (*result0).cfilename = copyStringRC1(filename0);
if (LOC4) nimGCunrefNoCycle(LOC4);
LOC5 = (NimStringDesc*)0;
LOC5 = (*result0).filename; (*result0).filename = copyStringRC1(filename0);
if (LOC5) nimGCunrefNoCycle(LOC5);
initidtable_296019_850551059((&(*result0).typecache));
initidtable_296019_850551059((&(*result0).forwtypecache));
asgnRefNoCycle((void**) (&(*result0).module), module0);
initintset_268885_2627731572((&(*result0).typeinfomarker));
asgnRef((void**) (&(*result0).initproc), newproc_529206_3723162438(NIM_NIL, result0));
(*(*result0).initproc).options = initprocoptions_562635_839829468(result0);
asgnRef((void**) (&(*result0).preinitproc), newpreinitproc_562625_839829468(result0));
asgnRef((void**) (&(*result0).postinitproc), newpostinitproc_562630_839829468(result0));
initnodetable_296085_850551059((&(*result0).datacache));
if ((*result0).typestack) nimGCunrefNoCycle((*result0).typestack);
(*result0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*result0).forwardedprocs) nimGCunrefNoCycle((*result0).forwardedprocs);
(*result0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*result0).typenodesname), gettempname_533598_839829468(result0));
asgnRefNoCycle((void**) (&(*result0).nimtypesname), gettempname_533598_839829468(result0));
{
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA8;
(*result0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
(*(*result0).preinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
(*(*result0).postinitproc).options &= ~(((NU32)1) << ((((Toption169009) 15)) % (sizeof(NU32)*8)));
}
LA8: ;
return result0;
}
N_NIMCALL(Tcgen529027*, rawnewmodule_563038_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
NimStringDesc* LOC1;
result0 = (Tcgen529027*)0;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(((NI32) ((*module0).position)));
result0 = rawnewmodule_562663_839829468(module0, LOC1);
return result0;
}
N_NIMCALL(Tcgen529027*, newmodule_563044_839829468)(Tsym292834* module0) {
Tcgen529027* result0;
result0 = (Tcgen529027*)0;
{
Tcgen529027* LOC3;
NimStringDesc* LOC6;
LOC3 = (Tcgen529027*)0;
LOC3 = getcgenmodule_532226_839829468(module0);
if (!!((LOC3 == NIM_NIL))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_9);
internalerror_196113_155036129(LOC6);
}
LA4: ;
result0 = rawnewmodule_563038_839829468(module0);
{
if (!((gmodules_529170_3723162438 ? gmodules_529170_3723162438->Sup.len : 0) <= (*module0).position)) goto LA9;
gmodules_529170_3723162438 = (TY529153*) setLengthSeq(&(gmodules_529170_3723162438)->Sup, sizeof(Tcgen529027*), ((NI) ((NI)((*module0).position + ((NI) 1)))));
}
LA9: ;
asgnRef((void**) (&gmodules_529170_3723162438->data[(*module0).position]), result0);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0)) goto LA13;
{
NimStringDesc* LOC19;
NimStringDesc* LOC20;
if (!(((*module0).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0)) goto LA17;
LOC19 = (NimStringDesc*)0;
LOC20 = (NimStringDesc*)0;
LOC20 = tofilename_192257_155036129(((NI32) ((*module0).position)));
LOC19 = rawNewString(LOC20->Sup.len + 28);
appendString(LOC19, ((NimStringDesc*) &T839829468_13));
appendString(LOC19, LOC20);
internalerror_196113_155036129(LOC19);
}
LA17: ;
}
LA13: ;
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopen_563112_839829468)(Tsym292834* module0) {
Tpasscontext341002* result0;
Tcgen529027* LOC1;
result0 = (Tpasscontext341002*)0;
LOC1 = (Tcgen529027*)0;
LOC1 = newmodule_563044_839829468(module0);
result0 = &LOC1->Sup;
{
NIM_BOOL LOC4;
NimStringDesc* f0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
LOC4 = (NIM_BOOL)0;
LOC4 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 27))&63U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = (generatedheader_532201_839829468 == NIM_NIL);
LA5: ;
if (!LOC4) goto LA6;
{
if (!(((NI) 0) < (headerfile_169138_2607990831 ? headerfile_169138_2607990831->Sup.len : 0))) goto LA10;
f0 = headerfile_169138_2607990831;
}
goto LA8;
LA10: ;
{
f0 = gprojectfull_169211_2607990831;
}
LA8: ;
LOC13 = (NimStringDesc*)0;
LOC13 = completecfilepath_273854_2528170400(f0, NIM_TRUE);
LOC14 = (NimStringDesc*)0;
LOC14 = noschangeFileExt(LOC13, ((NimStringDesc*) &T839829468_14));
asgnRef((void**) (&generatedheader_532201_839829468), rawnewmodule_562663_839829468(module0, LOC14));
(*generatedheader_532201_839829468).flags |= ((NU8)1)<<((((Codegenflag529025) 3))%(sizeof(NU8)*8));
}
LA6: ;
return result0;
}
N_NIMCALL(NimStringDesc*, getcfile_563201_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
NimStringDesc* ext0;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
ext0 = copyString(((NimStringDesc*) &T839829468_15));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (gcmd_169132_2607990831 == ((Tcommands169076) 3));
if (LOC8) goto LA9;
LOC8 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA9: ;
if (!LOC8) goto LA10;
ext0 = copyString(((NimStringDesc*) &T839829468_16));
}
goto LA1;
LA10: ;
{
ext0 = copyString(((NimStringDesc*) &T839829468_17));
}
LA1: ;
LOC13 = (NimStringDesc*)0;
LOC13 = withpackagename_170073_2607990831((*m0).cfilename);
LOC14 = (NimStringDesc*)0;
LOC14 = completecfilepath_273854_2528170400(LOC13, NIM_TRUE);
result0 = noschangeFileExt(LOC14, ext0);
return result0;
}
N_NIMCALL(Tpasscontext341002*, myopencached_563246_839829468)(Tsym292834* module0, Trodreader332021* rd0) {
Tpasscontext341002* result0;
Tcgen529027* m0;
NimStringDesc* LOC1;
result0 = (Tpasscontext341002*)0;
m0 = newmodule_563044_839829468(module0);
LOC1 = (NimStringDesc*)0;
LOC1 = getcfile_563201_839829468(m0);
readmergeinfo_530613_2760143328(LOC1, m0);
result0 = &m0->Sup;
return result0;
}
static N_INLINE(NIM_BOOL, skipcodegen_341085_2355241294)(Tnode292802* n0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((NI) 0) < gerrorcounter_192069_155036129);
return result0;
}
N_NIMCALL(void, fillloc_532282_839829468)(Tloc292816* a0, Tlockind292808 k0, Ttype292840* typ0, Ropeobj178006* r0, Tstorageloc292812 s0) {
{
if (!((*a0).k == ((Tlockind292808) 0))) goto LA3;
(*a0).k = k0;
unsureAsgnRef((void**) (&(*a0).t), typ0);
(*a0).s = s0;
{
if (!((*a0).r == NIM_NIL)) goto LA7;
unsureAsgnRef((void**) (&(*a0).r), r0);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, iskeyword_532960_839829468)(Tident199010* w0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
switch ((*w0).Sup.id) {
case ((NI) 200) ... ((NI) 262):
case ((NI) 4) ... ((NI) 70):
case ((NI) 138):
{
result0 = NIM_TRUE;
goto BeforeRet;
}
break;
default:
{
result0 = NIM_FALSE;
goto BeforeRet;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglename_533205_839829468)(Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*s0).loc.r;
{
NIM_BOOL keeporigname0;
NIM_BOOL LOC5;
NIM_BOOL LOC6;
NIM_BOOL LOC9;
NimStringDesc* LOC10;
if (!(result0 == NIM_NIL)) goto LA3;
LOC5 = (NIM_BOOL)0;
LOC6 = (NIM_BOOL)0;
LOC6 = ((2824 &(1U<<((NU)((*s0).kind)&31U)))!=0);
if (!(LOC6)) goto LA7;
LOC6 = ((IL64(2149580812) & (*s0).flags) == 0);
LA7: ;
LOC5 = LOC6;
if (!(LOC5)) goto LA8;
LOC9 = (NIM_BOOL)0;
LOC9 = iskeyword_532960_839829468((*s0).name);
LOC5 = !(LOC9);
LA8: ;
keeporigname0 = LOC5;
LOC10 = (NimStringDesc*)0;
LOC10 = mangle_528847_2036603609((*(*s0).name).s);
result0 = rope_178277_2381377266(LOC10);
{
if (!keeporigname0) goto LA13;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_18));
}
goto LA11;
LA13: ;
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
TY533289 LOC19;
Ropeobj178006* LOC20;
NU32 LOC21;
Ropeobj178006* LOC22;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC16, 0);
add_178482_2381377266(&result0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = rope_178401_2381377266(((NI64) ((*s0).Sup.id)));
add_178482_2381377266(&result0, LOC18);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ropeobj178006*)0;
LOC20 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_12), LOC19, 0);
add_178482_2381377266(&result0, LOC20);
LOC21 = (NU32)0;
LOC21 = hashowner_532977_839829468(s0);
LOC22 = (Ropeobj178006*)0;
LOC22 = rope_178401_2381377266(((NI64) (LOC21)));
add_178482_2381377266(&result0, LOC22);
}
LA11: ;
asgnRefNoCycle((void**) (&(*s0).loc.r), result0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, fillprocloc_539201_839829468)(Tsym292834* sym0) {
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 7), (*sym0).typ, LOC5, ((Tstorageloc292812) 2));
}
LA3: ;
}
N_NIMCALL(void, useheader_532369_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{
NimStringDesc* LOC5;
NIM_BOOL LOC6;
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 6))&15U)))!=0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = getstr_297230_850551059((*(*sym0).annex).path);
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*m0).headerfiles), LOC5);
}
LA3: ;
}
static N_INLINE(void, appendChar)(NimStringDesc* dest0, NIM_CHAR c0) {
(*dest0).data[((*dest0).Sup.len)- 0] = c0;
(*dest0).data[((NI)((*dest0).Sup.len + ((NI) 1)))- 0] = 0;
(*dest0).Sup.len += ((NI) 1);
}
N_NIMCALL(NIM_BOOL, isactivated_561431_839829468)(Tsym292834* prc0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = !(((*prc0).typ == NIM_NIL));
return result0;
}
N_NIMCALL(void, addforwardedproc_532203_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
(*m0).forwardedprocs = (Tsymseq292804*) incrSeqV2(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*));
asgnRefNoCycle((void**) (&(*m0).forwardedprocs->data[(*m0).forwardedprocs->Sup.len]), prc0);
++(*m0).forwardedprocs->Sup.len;
gforwardedprocscounter_529171_3723162438 += ((NI) 1);
}
N_NIMCALL(void, genclinedir_532725_839829468)(Ropeobj178006** r0, NimStringDesc* filename0, NI line0) {
{
TY532811 LOC5;
NimStringDesc* LOC6;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NimStringDesc*)0;
LOC6 = makesinglelinecstring_528835_2036603609(filename0);
LOC5[0] = rope_178277_2381377266(LOC6);
LOC5[1] = rope_178401_2381377266(((NI64) (line0)));
addf_179205_2381377266(r0, ((NimStringDesc*) &T839829468_21), LOC5, 2);
}
LA3: ;
}
static N_INLINE(NI, tolinenumber_192415_155036129)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = ((NI) (info0.line));
return result0;
}
N_NIMCALL(NI, safelinenm_532721_839829468)(Tlineinfo191336 info0) {
NI result0;
result0 = (NI)0;
result0 = tolinenumber_192415_155036129(info0);
{
if (!(result0 < ((NI) 0))) goto LA3;
result0 = ((NI) 0);
}
LA3: ;
return result0;
}
N_NIMCALL(void, genclinedir_532813_839829468)(Ropeobj178006** r0, Tlineinfo191336 info0) {
NimStringDesc* LOC1;
NI LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = tofullpath_192261_155036129(info0.fileindex);
LOC2 = (NI)0;
LOC2 = safelinenm_532721_839829468(info0);
genclinedir_532725_839829468(r0, LOC1, LOC2);
}
N_NIMCALL(Tctypekind529007, mapsettype_533389_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
NI64 LOC1;
result0 = (Tctypekind529007)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC1))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 4);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 5);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
result0 = ((Tctypekind529007) 17);
}
break;
}
return result0;
}
N_NIMCALL(Tctypekind529007, maptype_533394_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 0):
case ((Ttypekind292244) 7):
{
result0 = ((Tctypekind529007) 0);
}
break;
case ((Ttypekind292244) 1):
{
result0 = ((Tctypekind529007) 2);
}
break;
case ((Ttypekind292244) 2):
{
result0 = ((Tctypekind529007) 1);
}
break;
case ((Ttypekind292244) 19):
{
result0 = mapsettype_533389_839829468(typ0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 17);
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
result0 = ((Tctypekind529007) 19);
}
break;
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 12):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC8);
}
break;
case ((Ttypekind292244) 14):
{
{
NI64 LOC12;
LOC12 = (NI64)0;
LOC12 = firstord_320001_3876443242(typ0);
if (!(LOC12 < IL64(0))) goto LA13;
result0 = ((Tctypekind529007) 6);
}
goto LA10;
LA13: ;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = getsize_320135_3876443242(typ0);
switch (((NI) (LOC16))) {
case ((NI) 1):
{
result0 = ((Tctypekind529007) 13);
}
break;
case ((NI) 2):
{
result0 = ((Tctypekind529007) 14);
}
break;
case ((NI) 4):
{
result0 = ((Tctypekind529007) 6);
}
break;
case ((NI) 8):
{
result0 = ((Tctypekind529007) 7);
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
}
LA10: ;
}
break;
case ((Ttypekind292244) 20):
{
result0 = maptype_533394_839829468((*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
case ((Ttypekind292244) 22):
{
Ttype292840* base0;
Ttype292840* LOC24;
LOC24 = (Ttype292840*)0;
LOC24 = lastson_295377_850551059(typ0);
base0 = skiptypes_296099_850551059(LOC24, IL64(211106232576256));
switch ((*base0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 48):
{
result0 = ((Tctypekind529007) 18);
}
break;
default:
{
result0 = ((Tctypekind529007) 20);
}
break;
}
}
break;
case ((Ttypekind292244) 26):
{
result0 = ((Tctypekind529007) 20);
}
break;
case ((Ttypekind292244) 24):
{
result0 = ((Tctypekind529007) 22);
}
break;
case ((Ttypekind292244) 25):
{
{
if (!!(((*typ0).callconv == ((Tcallingconvention292002) 8)))) goto LA32;
result0 = ((Tctypekind529007) 23);
}
goto LA30;
LA32: ;
{
result0 = ((Tctypekind529007) 19);
}
LA30: ;
}
break;
case ((Ttypekind292244) 28):
{
result0 = ((Tctypekind529007) 21);
}
break;
case ((Ttypekind292244) 29):
{
result0 = ((Tctypekind529007) 24);
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = ((Tctypekind529007) ((NI)(((NI) ((NI)(((NI) ((*typ0).kind)) - ((NI) 31)))) + ((NI) 3))));
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC43;
if (!!(((*typ0).n == NIM_NIL))) goto LA41;
LOC43 = (Ttype292840*)0;
LOC43 = lastson_295377_850551059(typ0);
result0 = maptype_533394_839829468(LOC43);
}
goto LA39;
LA41: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
LA39: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_25));
}
break;
}
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedcpptype_533478_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, needscomplexassignment_533511_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = containsgarbagecollectedref_320117_3876443242(typ0);
return result0;
}
static N_INLINE(NIM_BOOL, isobjlackingtypefield_533515_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
NIM_BOOL LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*typ0).kind == ((Ttypekind292244) 17));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = ((*typ0).sons->data[((NI) 0)] == NIM_NIL);
LA5: ;
LOC3 = LOC4;
if (LOC3) goto LA6;
LOC3 = ispureobject_320138_3876443242(typ0);
LA6: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NIM_BOOL, isinvalidreturntype_533550_839829468)(Ttype292840* rettype0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!(rettype0 == NIM_NIL)) goto LA3;
result0 = NIM_TRUE;
}
goto LA1;
LA3: ;
{
Tctypekind529007 LOC6;
LOC6 = (Tctypekind529007)0;
LOC6 = maptype_533394_839829468(rettype0);
switch (LOC6) {
case ((Tctypekind529007) 17):
{
Ttype292840* LOC8;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
result0 = !(((14680064 &((NU64)1<<((NU)((*LOC8).kind)&63U)))!=0));
}
break;
case ((Tctypekind529007) 19):
{
Ttype292840* t0;
NIM_BOOL LOC16;
NIM_BOOL LOC18;
NIM_BOOL LOC20;
t0 = skiptypes_296099_850551059(rettype0, IL64(211106232576256));
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedcpptype_533478_839829468(rettype0);
if (LOC12) goto LA13;
LOC12 = isimportedcpptype_533478_839829468(t0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA14: ;
LOC16 = (NIM_BOOL)0;
LOC16 = needscomplexassignment_533511_839829468(t0);
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC18)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = isobjlackingtypefield_533515_839829468(t0);
LOC18 = !(LOC20);
LA19: ;
LOC16 = LOC18;
LA17: ;
result0 = LOC16;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typename_533292_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!!(((*typ0).sym == NIM_NIL))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*typ0).sym).name).s);
result0 = rope_178277_2381377266(LOC5);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_28), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypename_533313_839829468)(Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*typ0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*typ0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*(*typ0).sym).loc.r;
}
goto LA1;
LA5: ;
{
{
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
if (!((*typ0).loc.r == NIM_NIL)) goto LA10;
LOC12 = (Ropeobj178006*)0;
LOC12 = typename_533292_839829468(typ0);
LOC13 = (Ropeobj178006*)0;
LOC13 = rope_178401_2381377266(((NI64) ((*typ0).Sup.id)));
asgnRefNoCycle((void**) (&(*typ0).loc.r), HEX26_178418_2381377266(LOC12, LOC13));
}
LA10: ;
result0 = (*typ0).loc.r;
}
LA1: ;
{
NimStringDesc* LOC18;
if (!(result0 == NIM_NIL)) goto LA16;
LOC18 = (NimStringDesc*)0;
LOC18 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC18, ((NimStringDesc*) &T839829468_29));
appendString(LOC18, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC18);
}
LA16: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, typenameorliteral_533898_839829468)(Ttype292840* t0, NimStringDesc* literal0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*t0).sym == NIM_NIL));
if (!(LOC4)) goto LA5;
LOC4 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA6;
LOC3 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA6: ;
if (!LOC3) goto LA7;
result0 = gettypename_533313_839829468(t0);
}
goto LA1;
LA7: ;
{
result0 = rope_178277_2381377266(literal0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsimpletypedesc_533936_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*typ0).kind) {
case ((Ttypekind292244) 26):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_30));
}
break;
case ((Ttypekind292244) 28):
{
Ropeobj178006* LOC3;
LOC3 = (Ropeobj178006*)0;
LOC3 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_31));
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_32));
}
break;
case ((Ttypekind292244) 29):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_33));
}
break;
case ((Ttypekind292244) 1):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_34));
}
break;
case ((Ttypekind292244) 2):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_35));
}
break;
case ((Ttypekind292244) 5):
{
result0 = typenameorliteral_533898_839829468(typ0, ((NimStringDesc*) &T839829468_18));
}
break;
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
{
result0 = typenameorliteral_533898_839829468(typ0, Numericaltypetostr_533941_839829468[((*typ0).kind)- 31]);
}
break;
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 15):
{
result0 = getsimpletypedesc_533936_839829468(m0, (*typ0).sons->data[((NI) 0)]);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC15;
if (!!(((*typ0).n == NIM_NIL))) goto LA13;
LOC15 = (Ttype292840*)0;
LOC15 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC15);
}
goto LA11;
LA13: ;
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_50));
}
LA11: ;
}
break;
case ((Ttypekind292244) 11):
{
Ttype292840* LOC18;
LOC18 = (Ttype292840*)0;
LOC18 = lastson_295377_850551059(typ0);
result0 = getsimpletypedesc_533936_839829468(m0, LOC18);
}
break;
default:
{
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, cachegettype_533593_839829468)(Tidtable292850 tab0, Ttype292840* key0) {
Ropeobj178006* result0;
Tidobj199004* LOC1;
TNimObject* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Tidobj199004*)0;
LOC1 = &key0->Sup;
LOC2 = (TNimObject*)0;
LOC2 = idtableget_299086_2984716966(tab0, LOC1);
result0 = ((Ropeobj178006*) (LOC2));
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypepre_533972_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(typ0 == NIM_NIL)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_26));
}
goto LA1;
LA3: ;
{
result0 = getsimpletypedesc_533936_839829468(m0, typ0);
{
if (!(result0 == NIM_NIL)) goto LA8;
result0 = cachegettype_533593_839829468((*m0).typecache, typ0);
}
LA8: ;
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, isimportedtype_533451_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = !(((*t0).sym == NIM_NIL));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(NimStringDesc*, getforwardstructformat_534015_839829468)(Tcgen529027* m0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
result0 = copyString(((NimStringDesc*) &T839829468_54));
}
goto LA1;
LA5: ;
{
result0 = copyString(((NimStringDesc*) &T839829468_55));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, structorunion_534001_839829468)(Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 1))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_56));
}
goto LA1;
LA3: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_57));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypeforward_534039_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
{ result0 = (Ropeobj178006*)0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA3;
goto BeforeRet;
}
LA3: ;
result0 = gettypepre_533972_839829468(m0, typ0);
{
if (!!((result0 == NIM_NIL))) goto LA7;
goto BeforeRet;
}
LA7: ;
switch ((*typ0).kind) {
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
{
Tidobj199004* LOC17;
TNimObject* LOC18;
result0 = gettypename_533313_839829468(typ0);
{
NIM_BOOL LOC12;
NimStringDesc* LOC15;
TY532811 LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = isimportedtype_533451_839829468(typ0);
if (!!(LOC12)) goto LA13;
LOC15 = (NimStringDesc*)0;
LOC15 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = structorunion_534001_839829468(typ0);
LOC16[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC15, LOC16, 2);
}
LA13: ;
LOC17 = (Tidobj199004*)0;
LOC17 = &typ0->Sup;
LOC18 = (TNimObject*)0;
LOC18 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC17, LOC18);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC20, ((NimStringDesc*) &T839829468_58));
appendString(LOC20, reprEnum((NI)(*typ0).kind, (&NTI292244)));
appendChar(LOC20, 41);
internalerror_196113_155036129(LOC20);
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, pushtype_533958_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
(*m0).typestack = (Ttypeseq292836*) incrSeqV2(&((*m0).typestack)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&(*m0).typestack->data[(*m0).typestack->Sup.len]), typ0);
++(*m0).typestack->Sup.len;
}
N_NIMCALL(Ropeobj178006*, gettypedescweak_534079_839829468)(Tcgen529027* m0, Ttype292840* t0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* etb0;
result0 = (Ropeobj178006*)0;
etb0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC4)) goto LA5;
LOC4 = ((*t0).kind == ((Ttypekind292244) 11));
LA5: ;
if (!LOC4) goto LA6;
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
goto LA2;
LA6: ;
{
Ttype292840* x0;
x0 = getuniquetype_528640_2036603609(etb0);
result0 = gettypeforward_534039_839829468(m0, x0);
pushtype_533958_839829468(m0, x0);
}
LA2: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* LOC10;
x0 = getuniquetype_528640_2036603609(etb0);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_53));
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
result0 = gettypedescaux_533505_839829468(m0, t0, check0);
}
break;
}
return result0;
}
static N_INLINE(NI, len_293081_850551059)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!(*n0).kindU.S6.sons == 0) goto LA3;
result0 = ((NI) 0);
}
goto LA1;
LA3: ;
{
result0 = ((*n0).kindU.S6.sons ? (*n0).kindU.S6.sons->Sup.len : 0);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532632_839829468)(Tcgen529027* m0, Ropeobj178006** c0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(c0, LOC1);
}
N_NIMCALL(NIM_BOOL, scancppgenericslot_534827_839829468)(NimStringDesc* pat0, NI* cursor0, NI* outidx0, NI* outstars0) {
NIM_BOOL result0;
NI begin0;
{ result0 = (NIM_BOOL)0;
(*cursor0) += ((NI) 1);
begin0 = (*cursor0);
{
while (1) {
if (!((NU8)(pat0->data[(*cursor0)]) == (NU8)(42))) goto LA2;
(*cursor0) += ((NI) 1);
} LA2: ;
}
{
if (!(((NU8)(pat0->data[(*cursor0)])) >= ((NU8)(48)) && ((NU8)(pat0->data[(*cursor0)])) <= ((NU8)(57)))) goto LA5;
(*outidx0) = ((NI) ((NI)(((NI) (((NU8)(pat0->data[(*cursor0)])))) - ((NI) 48))));
(*outstars0) = (NI)((*cursor0) - begin0);
(*cursor0) += ((NI) 1);
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA3;
LA5: ;
{
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ttype292840*, resolvestarsincpptype_534891_839829468)(Ttype292840* typ0, NI idx0, NI stars0) {
Ttype292840* result0;
result0 = (Ttype292840*)0;
{
NI LOC3;
LOC3 = (NI)0;
LOC3 = len_295339_850551059(typ0);
if (!(LOC3 <= idx0)) goto LA4;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_81));
}
LA4: ;
result0 = (*typ0).sons->data[idx0];
{
NI i_534906_839829468;
NI res_534931_839829468;
i_534906_839829468 = (NI)0;
res_534931_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534931_839829468 <= stars0)) goto LA8;
i_534906_839829468 = res_534931_839829468;
{
NIM_BOOL LOC11;
NI LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = !((result0 == NIM_NIL));
if (!(LOC11)) goto LA12;
LOC13 = (NI)0;
LOC13 = len_295339_850551059(result0);
LOC11 = (((NI) 0) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
{
if (!((*result0).kind == ((Ttypekind292244) 11))) goto LA18;
result0 = (*result0).sons->data[((NI) 1)];
}
goto LA16;
LA18: ;
{
result0 = elemtype_320394_3876443242(result0);
}
LA16: ;
}
LA14: ;
res_534931_839829468 += ((NI) 1);
} LA8: ;
}
}
return result0;
}
N_NIMCALL(NimStringDesc*, manglefield_532973_839829468)(Tident199010* name0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = mangle_528847_2036603609((*name0).s);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = iskeyword_532960_839829468(name0);
if (!LOC3) goto LA4;
result0->data[((NI) 0)] = nsuToUpperAsciiChar(result0->data[((NI) 0)]);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, manglerecfieldname_534361_839829468)(Tsym292834* field0, Ttype292840* rectype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*rectype0).sym == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = !(((96 & (*(*rectype0).sym).flags) == 0));
LA4: ;
if (!LOC3) goto LA5;
result0 = (*field0).loc.r;
}
goto LA1;
LA5: ;
{
NimStringDesc* LOC8;
LOC8 = (NimStringDesc*)0;
LOC8 = manglefield_532973_839829468((*field0).name);
result0 = rope_178277_2381377266(LOC8);
}
LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA11;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_96));
}
LA11: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genrecordfieldsaux_534421_839829468)(Tcgen529027* m0, Tnode292802* n0, Ropeobj178006* accessexpr0, Ttype292840* rectype0, Intset268030* check0) {
Ropeobj178006* result0;
Ropeobj178006* ae0;
Ropeobj178006* uname0;
Ropeobj178006* sname0;
Ropeobj178006* a0;
Tnode292802* k0;
Tsym292834* field0;
{ result0 = (Ropeobj178006*)0;
ae0 = (Ropeobj178006*)0;
uname0 = (Ropeobj178006*)0;
sname0 = (Ropeobj178006*)0;
a0 = (Ropeobj178006*)0;
k0 = (Tnode292802*)0;
field0 = (Tsym292834*)0;
result0 = NIM_NIL;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_534447_839829468;
NI HEX3Atmp_534620_839829468;
NI LOC3;
NI res_534623_839829468;
i_534447_839829468 = (NI)0;
HEX3Atmp_534620_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295351_850551059(n0);
HEX3Atmp_534620_839829468 = (NI)(LOC3 - ((NI) 1));
res_534623_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC6;
if (!(res_534623_839829468 <= HEX3Atmp_534620_839829468)) goto LA5;
i_534447_839829468 = res_534623_839829468;
LOC6 = (Ropeobj178006*)0;
LOC6 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[i_534447_839829468], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC6);
res_534623_839829468 += ((NI) 1);
} LA5: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Ropeobj178006* LOC12;
NimStringDesc* LOC13;
NimStringDesc* LOC14;
Ropeobj178006* unionbody0;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA10;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_89));
}
LA10: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = genrecordfieldsaux_534421_839829468(m0, (*n0).kindU.S6.sons->data[((NI) 0)], accessexpr0, rectype0, check0);
add_178482_2381377266(&result0, LOC12);
LOC13 = (NimStringDesc*)0;
LOC14 = (NimStringDesc*)0;
LOC14 = mangle_528847_2036603609((*(*(*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
LOC13 = rawNewString(LOC14->Sup.len + 1);
appendString(LOC13, LOC14);
appendChar(LOC13, 85);
uname0 = rope_178277_2381377266(LOC13);
{
TY532811 LOC19;
if (!!((accessexpr0 == NIM_NIL))) goto LA17;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = accessexpr0;
LOC19[1] = uname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC19, 2);
}
goto LA15;
LA17: ;
{
ae0 = uname0;
}
LA15: ;
unionbody0 = NIM_NIL;
{
NI i_534491_839829468;
NI HEX3Atmp_534629_839829468;
NI LOC22;
NI res_534632_839829468;
i_534491_839829468 = (NI)0;
HEX3Atmp_534629_839829468 = (NI)0;
LOC22 = (NI)0;
LOC22 = sonslen_295351_850551059(n0);
HEX3Atmp_534629_839829468 = (NI)(LOC22 - ((NI) 1));
res_534632_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534632_839829468 <= HEX3Atmp_534629_839829468)) goto LA24;
i_534491_839829468 = res_534632_839829468;
switch ((*(*n0).kindU.S6.sons->data[i_534491_839829468]).kind) {
case ((Tnodekind292020) 85):
case ((Tnodekind292020) 88):
{
k0 = lastson_295364_850551059((*n0).kindU.S6.sons->data[i_534491_839829468]);
{
Ropeobj178006* LOC30;
TY532811 LOC31;
Ropeobj178006* LOC32;
if (!!(((*k0).kind == ((Tnodekind292020) 3)))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) (i_534491_839829468)));
sname0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_91), LOC30);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = ae0;
LOC31[1] = sname0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC31, 2);
a0 = genrecordfieldsaux_534421_839829468(m0, k0, LOC32, rectype0, check0);
{
TY178507 LOC37;
if (!!((a0 == NIM_NIL))) goto LA35;
add_178487_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_92));
add_178482_2381377266(&unionbody0, a0);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = sname0;
addf_179205_2381377266(&unionbody0, ((NimStringDesc*) &T839829468_93), LOC37, 1);
}
LA35: ;
}
goto LA26;
LA28: ;
{
Ropeobj178006* LOC39;
LOC39 = (Ropeobj178006*)0;
LOC39 = genrecordfieldsaux_534421_839829468(m0, k0, ae0, rectype0, check0);
add_178482_2381377266(&unionbody0, LOC39);
}
LA26: ;
}
break;
default:
{
internalerror_196113_155036129(((NimStringDesc*) &T839829468_94));
}
break;
}
res_534632_839829468 += ((NI) 1);
} LA24: ;
}
}
{
TY532811 LOC45;
if (!!((unionbody0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = unionbody0;
LOC45[1] = uname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_95), LOC45, 2);
}
LA43: ;
}
break;
case ((Tnodekind292020) 3):
{
field0 = (*n0).kindU.S4.sym;
{
if (!((*(*field0).typ).kind == ((Ttypekind292244) 62))) goto LA49;
goto BeforeRet;
}
LA49: ;
sname0 = manglerecfieldname_534361_839829468(field0, rectype0);
{
TY532811 LOC55;
if (!!((accessexpr0 == NIM_NIL))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = accessexpr0;
LOC55[1] = sname0;
ae0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC55, 2);
}
goto LA51;
LA53: ;
{
ae0 = sname0;
}
LA51: ;
fillloc_532282_839829468((&(*field0).loc), ((Tlockind292808) 5), (*field0).typ, ae0, ((Tstorageloc292812) 0));
{
NIM_BOOL LOC59;
Ttype292840* fieldtype0;
LOC59 = (NIM_BOOL)0;
LOC59 = isimportedcpptype_533478_839829468(rectype0);
if (!!(LOC59)) goto LA60;
fieldtype0 = skiptypes_296099_850551059((*field0).loc.t, IL64(211106232576256));
{
NIM_BOOL LOC64;
TY532811 LOC68;
Ttype292840* LOC69;
LOC64 = (NIM_BOOL)0;
LOC64 = ((*fieldtype0).kind == ((Ttypekind292244) 16));
if (!(LOC64)) goto LA65;
LOC64 = (((*fieldtype0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0);
LA65: ;
if (!LOC64) goto LA66;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC69 = (Ttype292840*)0;
LOC69 = elemtype_320394_3876443242(fieldtype0);
LOC68[0] = gettypedescaux_533505_839829468(m0, LOC69, check0);
LOC68[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_97), LOC68, 2);
}
goto LA62;
LA66: ;
{
TY532811 LOC73;
if (!((*fieldtype0).kind == ((Ttypekind292244) 24))) goto LA71;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = gettypedescweak_534079_839829468(m0, (*field0).loc.t, check0);
LOC73[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC73, 2);
}
goto LA62;
LA71: ;
{
TY535238 LOC77;
NimStringDesc* LOC78;
if (!!(((*field0).kindU.S4.bitsize == ((NI) 0)))) goto LA75;
memset((void*)LOC77, 0, sizeof(LOC77));
LOC77[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC77[1] = sname0;
LOC78 = (NimStringDesc*)0;
LOC78 = nimIntToStr((*field0).kindU.S4.bitsize);
LOC77[2] = rope_178277_2381377266(LOC78);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_98), LOC77, 3);
}
goto LA62;
LA75: ;
{
TY532811 LOC80;
memset((void*)LOC80, 0, sizeof(LOC80));
LOC80[0] = gettypedescaux_533505_839829468(m0, (*field0).loc.t, check0);
LOC80[1] = sname0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_54), LOC80, 2);
}
LA62: ;
}
LA60: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_99));
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecordfields_534636_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genrecordfieldsaux_534421_839829468(m0, (*typ0).n, NIM_NIL, typ0, check0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getrecorddesc_534643_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
NIM_BOOL hasfield0;
Ropeobj178006* attribute0;
TY535238 LOC6;
Ropeobj178006* desc0;
NimStringDesc* LOC46;
result0 = (Ropeobj178006*)0;
hasfield0 = NIM_FALSE;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 21))&31U)))!=0)) goto LA3;
attribute0 = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field19);
}
goto LA1;
LA3: ;
{
attribute0 = NIM_NIL;
}
LA1: ;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = structorunion_534001_839829468(typ0);
LOC6[1] = name0;
LOC6[2] = attribute0;
result0 = ropecg_532407_839829468(m0, Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field18, LOC6, 3);
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA9;
{
if (!((*typ0).sons->data[((NI) 0)] == NIM_NIL)) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
TY533289 LOC23;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = !(((*typ0).sym == NIM_NIL));
if (!(LOC18)) goto LA19;
LOC18 = (((*(*typ0).sym).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (LOC17) goto LA20;
LOC17 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_85), LOC23, 0);
}
goto LA15;
LA21: ;
{
TY532811 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = name0;
LOC25[1] = attribute0;
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_86), LOC25, 2);
hasfield0 = NIM_TRUE;
}
LA15: ;
}
goto LA11;
LA13: ;
{
NIM_BOOL LOC27;
TY178507 LOC31;
Ttype292840* LOC32;
LOC27 = (NIM_BOOL)0;
LOC27 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC27) goto LA28;
LOC27 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ttype292840*)0;
LOC32 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC31[0] = gettypedescaux_533505_839829468(m0, LOC32, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_87), LOC31, 1);
hasfield0 = NIM_TRUE;
}
goto LA11;
LA29: ;
{
TY178507 LOC34;
Ttype292840* LOC35;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC35 = (Ttype292840*)0;
LOC35 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106247215360));
LOC34[0] = gettypedescaux_533505_839829468(m0, LOC35, check0);
appcg_532632_839829468(m0, &result0, ((NimStringDesc*) &T839829468_88), LOC34, 1);
hasfield0 = NIM_TRUE;
}
LA11: ;
}
goto LA7;
LA9: ;
{
TY178507 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = name0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_85), LOC37, 1);
}
LA7: ;
desc0 = getrecordfields_534636_839829468(m0, typ0, check0);
{
NIM_BOOL LOC40;
TY533289 LOC44;
LOC40 = (NIM_BOOL)0;
LOC40 = (desc0 == NIM_NIL);
if (!(LOC40)) goto LA41;
LOC40 = !(hasfield0);
LA41: ;
if (!LOC40) goto LA42;
memset((void*)LOC44, 0, sizeof(LOC44));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_100), LOC44, 0);
}
goto LA38;
LA42: ;
{
add_178482_2381377266(&result0, desc0);
}
LA38: ;
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC46, ((NimStringDesc*) &T839829468_101));
appendString(LOC46, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC46);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettupledesc_534777_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0, Intset268030* check0) {
Ropeobj178006* result0;
TY532811 LOC1;
Ropeobj178006* desc0;
NimStringDesc* LOC13;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = structorunion_534001_839829468(typ0);
LOC1[1] = name0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_102), LOC1, 2);
desc0 = NIM_NIL;
{
NI i_534799_839829468;
NI HEX3Atmp_534820_839829468;
NI LOC3;
NI res_534823_839829468;
i_534799_839829468 = (NI)0;
HEX3Atmp_534820_839829468 = (NI)0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
HEX3Atmp_534820_839829468 = (NI)(LOC3 - ((NI) 1));
res_534823_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC6;
if (!(res_534823_839829468 <= HEX3Atmp_534820_839829468)) goto LA5;
i_534799_839829468 = res_534823_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_534799_839829468], check0);
LOC6[1] = rope_178401_2381377266(((NI64) (i_534799_839829468)));
addf_179205_2381377266(&desc0, ((NimStringDesc*) &T839829468_103), LOC6, 2);
res_534823_839829468 += ((NI) 1);
} LA5: ;
}
}
{
NimStringDesc* LOC11;
if (!(desc0 == NIM_NIL)) goto LA9;
LOC11 = (NimStringDesc*)0;
LOC11 = rawNewString(tnl_176644_4151366050->Sup.len + 11);
appendString(LOC11, ((NimStringDesc*) &T839829468_104));
appendString(LOC11, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC11);
}
goto LA7;
LA9: ;
{
add_178482_2381377266(&result0, desc0);
}
LA7: ;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC13, ((NimStringDesc*) &T839829468_101));
appendString(LOC13, tnl_176644_4151366050);
add_178487_2381377266(&result0, LOC13);
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedescaux_533505_839829468)(Tcgen529027* m0, Ttype292840* typ0, Intset268030* check0) {
Ropeobj178006* result0;
Ttype292840* t_534942_839829468;
{ result0 = (Ropeobj178006*)0;
t_534942_839829468 = getuniquetype_528640_2036603609(typ0);
{
if (!(t_534942_839829468 == NIM_NIL)) goto LA3;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_27));
}
LA3: ;
{
if (!!(((*t_534942_839829468).sym == NIM_NIL))) goto LA7;
useheader_532369_839829468(m0, (*t_534942_839829468).sym);
}
LA7: ;
result0 = gettypepre_533972_839829468(m0, t_534942_839829468);
{
if (!!((result0 == NIM_NIL))) goto LA11;
goto BeforeRet;
}
LA11: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572(check0, (*t_534942_839829468).Sup.id);
if (!LOC15) goto LA16;
{
NIM_BOOL LOC20;
NimStringDesc* LOC24;
NimStringDesc* LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = isimportedcpptype_533478_839829468(typ0);
if (LOC20) goto LA21;
LOC20 = isimportedcpptype_533478_839829468(t_534942_839829468);
LA21: ;
if (!!(LOC20)) goto LA22;
LOC24 = (NimStringDesc*)0;
LOC25 = (NimStringDesc*)0;
LOC25 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC24 = rawNewString(LOC25->Sup.len + 28);
appendString(LOC24, ((NimStringDesc*) &T839829468_51));
appendString(LOC24, LOC25);
internalerror_196113_155036129(LOC24);
}
LA22: ;
}
LA16: ;
switch ((*t_534942_839829468).kind) {
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 23):
{
NimStringDesc* star0;
Ttype292840* et0;
Ttype292840* LOC38;
Ttype292840* etb0;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC33;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*t_534942_839829468).kind == ((Ttypekind292244) 23));
if (!(LOC30)) goto LA31;
LOC30 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC33) goto LA34;
LOC33 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA34: ;
LOC29 = LOC33;
LA32: ;
if (!LOC29) goto LA35;
star0 = copyString(((NimStringDesc*) &T839829468_52));
}
goto LA27;
LA35: ;
{
star0 = copyString(((NimStringDesc*) &T839829468_53));
}
LA27: ;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
et0 = lastson_295377_850551059(LOC38);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
{
if (!((IL64(281475110993936) &((NU64)1<<((NU)((*etb0).kind)&63U)))!=0)) goto LA41;
et0 = elemtype_320394_3876443242(etb0);
etb0 = skiptypes_296099_850551059(et0, IL64(211106232576256));
star0->data[((NI) 0)] = 42;
}
LA41: ;
switch ((*etb0).kind) {
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC46;
Ropeobj178006* LOC50;
LOC46 = (NIM_BOOL)0;
LOC46 = isimportedcpptype_533478_839829468(etb0);
if (!(LOC46)) goto LA47;
LOC46 = ((*et0).kind == ((Ttypekind292244) 11));
LA47: ;
if (!LOC46) goto LA48;
LOC50 = (Ropeobj178006*)0;
LOC50 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC50, star0);
}
goto LA44;
LA48: ;
{
Ttype292840* x0;
Ropeobj178006* name0;
Tidobj199004* LOC52;
TNimObject* LOC53;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
result0 = HEX26_178447_2381377266(name0, star0);
LOC52 = (Tidobj199004*)0;
LOC52 = &t_534942_839829468->Sup;
LOC53 = (TNimObject*)0;
LOC53 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC52, LOC53);
pushtype_533958_839829468(m0, x0);
}
LA44: ;
}
break;
case ((Ttypekind292244) 24):
{
Ttype292840* x0;
Ropeobj178006* name0;
Ropeobj178006* LOC55;
Tidobj199004* LOC56;
TNimObject* LOC57;
x0 = getuniquetype_528640_2036603609(etb0);
name0 = gettypeforward_534039_839829468(m0, x0);
LOC55 = (Ropeobj178006*)0;
LOC55 = HEX26_178447_2381377266(name0, ((NimStringDesc*) &T839829468_53));
result0 = HEX26_178447_2381377266(LOC55, star0);
LOC56 = (Tidobj199004*)0;
LOC56 = &t_534942_839829468->Sup;
LOC57 = (TNimObject*)0;
LOC57 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC56, LOC57);
pushtype_533958_839829468(m0, x0);
}
break;
default:
{
Ropeobj178006* LOC59;
Tidobj199004* LOC60;
TNimObject* LOC61;
LOC59 = (Ropeobj178006*)0;
LOC59 = gettypedescaux_533505_839829468(m0, et0, check0);
result0 = HEX26_178447_2381377266(LOC59, star0);
LOC60 = (Tidobj199004*)0;
LOC60 = &t_534942_839829468->Sup;
LOC61 = (TNimObject*)0;
LOC61 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC60, LOC61);
}
break;
}
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Ropeobj178006* LOC63;
Tidobj199004* LOC64;
TNimObject* LOC65;
LOC63 = (Ropeobj178006*)0;
LOC63 = gettypedescweak_534079_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
result0 = HEX26_178447_2381377266(LOC63, ((NimStringDesc*) &T839829468_53));
LOC64 = (Tidobj199004*)0;
LOC64 = &t_534942_839829468->Sup;
LOC65 = (TNimObject*)0;
LOC65 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC64, LOC65);
}
break;
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 14):
{
Ttype292840* t0;
{
if (!((*t_534942_839829468).kind == ((Ttypekind292244) 20))) goto LA69;
t0 = lastson_295377_850551059(t_534942_839829468);
}
goto LA67;
LA69: ;
{
t0 = t_534942_839829468;
}
LA67: ;
result0 = cachegettype_533593_839829468((*m0).typecache, t0);
{
if (!(result0 == NIM_NIL)) goto LA74;
result0 = gettypename_533313_839829468(t0);
{
NIM_BOOL LOC78;
NIM_BOOL LOC80;
Tidobj199004* LOC84;
TNimObject* LOC85;
NI size0;
NU32 owner0;
LOC78 = (NIM_BOOL)0;
LOC78 = isimportedcpptype_533478_839829468(t0);
if (LOC78) goto LA79;
LOC80 = (NIM_BOOL)0;
LOC80 = (((*(*t0).sym).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0);
if (!(LOC80)) goto LA81;
LOC80 = ((*(*t0).sym).magic == ((Tmagic292524) 0));
LA81: ;
LOC78 = LOC80;
LA79: ;
if (!!(LOC78)) goto LA82;
LOC84 = (Tidobj199004*)0;
LOC84 = &t0->Sup;
LOC85 = (TNimObject*)0;
LOC85 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC84, LOC85);
size0 = (NI)0;
{
NI64 LOC88;
TY178507 LOC91;
LOC88 = (NI64)0;
LOC88 = firstord_320001_3876443242(t0);
if (!(LOC88 < IL64(0))) goto LA89;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC91, 1);
size0 = ((NI) 4);
}
goto LA86;
LA89: ;
{
NI64 LOC93;
LOC93 = (NI64)0;
LOC93 = getsize_320135_3876443242(t0);
size0 = ((NI) (LOC93));
switch (size0) {
case ((NI) 1):
{
TY178507 LOC95;
memset((void*)LOC95, 0, sizeof(LOC95));
LOC95[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_60), LOC95, 1);
}
break;
case ((NI) 2):
{
TY178507 LOC97;
memset((void*)LOC97, 0, sizeof(LOC97));
LOC97[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_61), LOC97, 1);
}
break;
case ((NI) 4):
{
TY178507 LOC99;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC99[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_59), LOC99, 1);
}
break;
case ((NI) 8):
{
TY178507 LOC101;
memset((void*)LOC101, 0, sizeof(LOC101));
LOC101[0] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_62), LOC101, 1);
}
break;
default:
{
internalerror_196100_155036129((*(*t0).sym).info, ((NimStringDesc*) &T839829468_63));
}
break;
}
}
LA86: ;
owner0 = hashowner_532977_839829468((*t0).sym);
{
NIM_BOOL LOC105;
TY203017* vals0;
Enumdesc203007 LOC114;
LOC105 = (NIM_BOOL)0;
LOC105 = hasenum_203230_1926258066((&gdebuginfo_203470_1926258066), (*(*(*t0).sym).name).s, ((NI) ((*(*t0).sym).info.line)), owner0);
if (!!(LOC105)) goto LA106;
vals0 = (TY203017*) newSeq((&NTI203017), 0);
{
NI i_535144_839829468;
NI HEX3Atmp_535649_839829468;
NI LOC109;
NI res_535652_839829468;
i_535144_839829468 = (NI)0;
HEX3Atmp_535649_839829468 = (NI)0;
LOC109 = (NI)0;
LOC109 = len_293081_850551059((*t0).n);
HEX3Atmp_535649_839829468 = (NI)(LOC109 - ((NI) 1));
res_535652_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
TY203018 LOC112;
NimStringDesc* LOC113;
if (!(res_535652_839829468 <= HEX3Atmp_535649_839829468)) goto LA111;
i_535144_839829468 = res_535652_839829468;
field0 = (*(*(*t0).n).kindU.S6.sons->data[i_535144_839829468]).kindU.S4.sym;
memset((void*)(&LOC112), 0, sizeof(LOC112));
LOC112.Field0 = copyString((*(*field0).name).s);
LOC112.Field1 = (*field0).position;
vals0 = (TY203017*) incrSeqV2(&(vals0)->Sup, sizeof(TY203018));
LOC113 = (NimStringDesc*)0;
LOC113 = vals0->data[vals0->Sup.len].Field0; vals0->data[vals0->Sup.len].Field0 = copyStringRC1(LOC112.Field0);
if (LOC113) nimGCunrefNoCycle(LOC113);
vals0->data[vals0->Sup.len].Field1 = LOC112.Field1;
++vals0->Sup.len;
res_535652_839829468 += ((NI) 1);
} LA111: ;
}
}
memset((void*)(&LOC114), 0, sizeof(LOC114));
memset((void*)(&LOC114), 0, sizeof(LOC114));
LOC114.size = size0;
LOC114.owner = owner0;
LOC114.id = (*(*t0).sym).Sup.id;
LOC114.name = copyString((*(*(*t0).sym).name).s);
genericSeqAssign((&LOC114.values), vals0, (&NTI203017));
registerenum_203419_1926258066((&gdebuginfo_203470_1926258066), (&LOC114));
}
LA106: ;
}
LA82: ;
}
LA74: ;
}
break;
case ((Ttypekind292244) 25):
{
Tidobj199004* LOC116;
TNimObject* LOC117;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC116 = (Tidobj199004*)0;
LOC116 = &t_534942_839829468->Sup;
LOC117 = (TNimObject*)0;
LOC117 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC116, LOC117);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t_534942_839829468, &rettype0, &desc0, check0, NIM_TRUE, NIM_TRUE);
{
NIM_BOOL LOC120;
LOC120 = (NIM_BOOL)0;
LOC120 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC120)) goto LA121;
{
TY535235 LOC127;
if (!!(((*t_534942_839829468).callconv == ((Tcallingconvention292002) 8)))) goto LA125;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t_534942_839829468).callconv)- 0]);
LOC127[1] = rettype0;
LOC127[2] = result0;
LOC127[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC127, 4);
}
goto LA123;
LA125: ;
{
TY535238 LOC129;
memset((void*)LOC129, 0, sizeof(LOC129));
LOC129[0] = result0;
LOC129[1] = rettype0;
LOC129[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC129, 3);
}
LA123: ;
}
LA121: ;
}
break;
case ((Ttypekind292244) 24):
{
Tidobj199004* LOC144;
Ropeobj178006* LOC145;
TNimObject* LOC146;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC142;
TNimObject* LOC143;
if (!(result0 == NIM_NIL)) goto LA133;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC137;
NimStringDesc* LOC140;
TY532811 LOC141;
LOC137 = (NIM_BOOL)0;
LOC137 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC137)) goto LA138;
LOC140 = (NimStringDesc*)0;
LOC140 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC141, 0, sizeof(LOC141));
LOC141[0] = structorunion_534001_839829468(t_534942_839829468);
LOC141[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC140, LOC141, 2);
}
LA138: ;
LOC142 = (Tidobj199004*)0;
LOC142 = &t_534942_839829468->Sup;
LOC143 = (TNimObject*)0;
LOC143 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC142, LOC143);
}
LA133: ;
LOC144 = (Tidobj199004*)0;
LOC144 = &t_534942_839829468->Sup;
LOC145 = (Ropeobj178006*)0;
LOC145 = HEX26_178447_2381377266(result0, ((NimStringDesc*) &T839829468_53));
LOC146 = (TNimObject*)0;
LOC146 = &LOC145->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC144, LOC146);
{
NIM_BOOL LOC149;
LOC149 = (NIM_BOOL)0;
LOC149 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC149)) goto LA150;
{
Ttype292840* LOC154;
NimStringDesc* LOC157;
NimStringDesc* LOC158;
TY532811 LOC166;
LOC154 = (Ttype292840*)0;
LOC154 = skiptypes_296099_850551059((*t_534942_839829468).sons->data[((NI) 0)], IL64(211106232576256));
if (!!(((*LOC154).kind == ((Ttypekind292244) 3)))) goto LA155;
LOC157 = (NimStringDesc*)0;
LOC158 = (NimStringDesc*)0;
{
NIM_BOOL LOC161;
LOC161 = (NIM_BOOL)0;
LOC161 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC161) goto LA162;
LOC161 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA162: ;
if (!LOC161) goto LA163;
LOC158 = copyString(((NimStringDesc*) &T839829468_76));
}
goto LA159;
LA163: ;
{
LOC158 = copyString(((NimStringDesc*) &T839829468_77));
}
LA159: ;
LOC157 = rawNewString(LOC158->Sup.len + 31);
appendString(LOC157, LOC158);
appendString(LOC157, ((NimStringDesc*) &T839829468_78));
memset((void*)LOC166, 0, sizeof(LOC166));
LOC166[0] = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 0)], check0);
LOC166[1] = result0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 4))- 0], LOC157, LOC166, 2);
}
goto LA152;
LA155: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_79));
}
LA152: ;
}
LA150: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_53));
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 n0;
Tidobj199004* LOC173;
TNimObject* LOC174;
n0 = lengthord_320007_3876443242(t_534942_839829468);
{
if (!(n0 <= IL64(0))) goto LA171;
n0 = IL64(1);
}
LA171: ;
result0 = gettypename_533313_839829468(t_534942_839829468);
LOC173 = (Tidobj199004*)0;
LOC173 = &t_534942_839829468->Sup;
LOC174 = (TNimObject*)0;
LOC174 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC173, LOC174);
{
NIM_BOOL LOC177;
Ropeobj178006* foo0;
TY535238 LOC180;
LOC177 = (NIM_BOOL)0;
LOC177 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC177)) goto LA178;
foo0 = gettypedescaux_533505_839829468(m0, (*t_534942_839829468).sons->data[((NI) 1)], check0);
memset((void*)LOC180, 0, sizeof(LOC180));
LOC180[0] = foo0;
LOC180[1] = result0;
LOC180[2] = rope_178401_2381377266(n0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_80), LOC180, 3);
}
LA178: ;
}
break;
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC184;
Ropeobj178006* cppname0;
NI i0;
NI chunkstart0;
Ropeobj178006* LOC226;
LOC184 = (NIM_BOOL)0;
LOC184 = isimportedcpptype_533478_839829468(t_534942_839829468);
if (!(LOC184)) goto LA185;
LOC184 = ((*typ0).kind == ((Ttypekind292244) 11));
LA185: ;
if (!LOC184) goto LA186;
cppname0 = gettypename_533313_839829468(t_534942_839829468);
i0 = ((NI) 0);
chunkstart0 = ((NI) 0);
{
while (1) {
if (!(i0 < ((*cppname0).data ? (*cppname0).data->Sup.len : 0))) goto LA189;
{
NI chunkend0;
NI idx0;
NI stars0;
if (!((NU8)((*cppname0).data->data[i0]) == (NU8)(39))) goto LA192;
chunkend0 = (i0 - 1);
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC196;
NimStringDesc* LOC199;
Ttype292840* typeinslot0;
LOC196 = (NIM_BOOL)0;
LOC196 = scancppgenericslot_534827_839829468((*cppname0).data, (&i0), (&idx0), (&stars0));
if (!LOC196) goto LA197;
LOC199 = (NimStringDesc*)0;
LOC199 = copyStrLast((*cppname0).data, chunkstart0, chunkend0);
add_178487_2381377266(&result0, LOC199);
chunkstart0 = i0;
typeinslot0 = resolvestarsincpptype_534891_839829468(typ0, (NI)(idx0 + ((NI) 1)), stars0);
{
NIM_BOOL LOC202;
TY533289 LOC206;
Ropeobj178006* LOC207;
LOC202 = (NIM_BOOL)0;
LOC202 = (typeinslot0 == NIM_NIL);
if (LOC202) goto LA203;
LOC202 = ((*typeinslot0).kind == ((Ttypekind292244) 62));
LA203: ;
if (!LOC202) goto LA204;
memset((void*)LOC206, 0, sizeof(LOC206));
LOC207 = (Ropeobj178006*)0;
LOC207 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC206, 0);
add_178482_2381377266(&result0, LOC207);
}
goto LA200;
LA204: ;
{
Ropeobj178006* LOC209;
LOC209 = (Ropeobj178006*)0;
LOC209 = gettypedescaux_533505_839829468(m0, typeinslot0, check0);
add_178482_2381377266(&result0, LOC209);
}
LA200: ;
}
LA197: ;
}
goto LA190;
LA192: ;
{
i0 += ((NI) 1);
}
LA190: ;
} LA189: ;
}
{
NimStringDesc* LOC215;
if (!!((chunkstart0 == ((NI) 0)))) goto LA213;
LOC215 = (NimStringDesc*)0;
LOC215 = copyStr((*cppname0).data, chunkstart0);
add_178487_2381377266(&result0, LOC215);
}
goto LA211;
LA213: ;
{
result0 = HEX26_178447_2381377266(cppname0, ((NimStringDesc*) &T839829468_82));
{
NI i_535516_839829468;
NI HEX3Atmp_535665_839829468;
NI LOC218;
NI res_535668_839829468;
i_535516_839829468 = (NI)0;
HEX3Atmp_535665_839829468 = (NI)0;
LOC218 = (NI)0;
LOC218 = len_295339_850551059(typ0);
HEX3Atmp_535665_839829468 = (NI)(LOC218 - ((NI) 2));
res_535668_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC225;
if (!(res_535668_839829468 <= HEX3Atmp_535665_839829468)) goto LA220;
i_535516_839829468 = res_535668_839829468;
{
if (!(((NI) 1) < i_535516_839829468)) goto LA223;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_83));
}
LA223: ;
LOC225 = (Ropeobj178006*)0;
LOC225 = gettypedescaux_533505_839829468(m0, (*typ0).sons->data[i_535516_839829468], check0);
add_178482_2381377266(&result0, LOC225);
res_535668_839829468 += ((NI) 1);
} LA220: ;
}
}
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_84));
}
LA211: ;
LOC226 = (Ropeobj178006*)0;
LOC226 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA182;
LA186: ;
{
Tidobj199004* LOC241;
TNimObject* LOC242;
Ropeobj178006* recdesc0;
result0 = cachegettype_533593_839829468((*m0).forwtypecache, t_534942_839829468);
{
Tidobj199004* LOC239;
TNimObject* LOC240;
if (!(result0 == NIM_NIL)) goto LA230;
result0 = gettypename_533313_839829468(t_534942_839829468);
{
NIM_BOOL LOC234;
NimStringDesc* LOC237;
TY532811 LOC238;
LOC234 = (NIM_BOOL)0;
LOC234 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC234)) goto LA235;
LOC237 = (NimStringDesc*)0;
LOC237 = getforwardstructformat_534015_839829468(m0);
memset((void*)LOC238, 0, sizeof(LOC238));
LOC238[0] = structorunion_534001_839829468(t_534942_839829468);
LOC238[1] = result0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 2))- 0], LOC237, LOC238, 2);
}
LA235: ;
LOC239 = (Tidobj199004*)0;
LOC239 = &t_534942_839829468->Sup;
LOC240 = (TNimObject*)0;
LOC240 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).forwtypecache), LOC239, LOC240);
}
LA230: ;
LOC241 = (Tidobj199004*)0;
LOC241 = &t_534942_839829468->Sup;
LOC242 = (TNimObject*)0;
LOC242 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC241, LOC242);
{
if (!!(((*t_534942_839829468).kind == ((Ttypekind292244) 18)))) goto LA245;
recdesc0 = getrecorddesc_534643_839829468(m0, t_534942_839829468, result0, check0);
}
goto LA243;
LA245: ;
{
recdesc0 = gettupledesc_534777_839829468(m0, t_534942_839829468, result0, check0);
}
LA243: ;
{
NIM_BOOL LOC250;
LOC250 = (NIM_BOOL)0;
LOC250 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC250)) goto LA251;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], recdesc0);
}
LA251: ;
}
LA182: ;
}
break;
case ((Ttypekind292244) 19):
{
Ttype292840* LOC254;
Ropeobj178006* LOC255;
Tidobj199004* LOC256;
TNimObject* LOC257;
LOC254 = (Ttype292840*)0;
LOC254 = lastson_295377_850551059(t_534942_839829468);
LOC255 = (Ropeobj178006*)0;
LOC255 = gettypename_533313_839829468(LOC254);
result0 = HEX26_178447_2381377266(LOC255, ((NimStringDesc*) &T839829468_105));
LOC256 = (Tidobj199004*)0;
LOC256 = &t_534942_839829468->Sup;
LOC257 = (TNimObject*)0;
LOC257 = &result0->Sup;
idtableput_299094_2984716966((&(*m0).typecache), LOC256, LOC257);
{
NIM_BOOL LOC260;
NI s0;
NI64 LOC263;
LOC260 = (NIM_BOOL)0;
LOC260 = isimportedtype_533451_839829468(t_534942_839829468);
if (!!(LOC260)) goto LA261;
LOC263 = (NI64)0;
LOC263 = getsize_320135_3876443242(t_534942_839829468);
s0 = ((NI) (LOC263));
switch (s0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
TY532811 LOC265;
memset((void*)LOC265, 0, sizeof(LOC265));
LOC265[0] = result0;
LOC265[1] = rope_178401_2381377266(((NI64) ((NI)(s0 * ((NI) 8)))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_106), LOC265, 2);
}
break;
default:
{
TY532811 LOC267;
NI64 LOC268;
memset((void*)LOC267, 0, sizeof(LOC267));
LOC267[0] = result0;
LOC268 = (NI64)0;
LOC268 = getsize_320135_3876443242(t_534942_839829468);
LOC267[1] = rope_178401_2381377266(LOC268);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_107), LOC267, 2);
}
break;
}
}
LA261: ;
}
break;
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 13):
case ((Ttypekind292244) 15):
case ((Ttypekind292244) 46):
case ((Ttypekind292244) 47):
case ((Ttypekind292244) 49):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC270;
LOC270 = (Ttype292840*)0;
LOC270 = lastson_295377_850551059(t_534942_839829468);
result0 = gettypedescaux_533505_839829468(m0, LOC270, check0);
}
break;
default:
{
NimStringDesc* LOC272;
LOC272 = (NimStringDesc*)0;
LOC272 = rawNewString(reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244))->Sup.len + 16);
appendString(LOC272, ((NimStringDesc*) &T839829468_108));
appendString(LOC272, reprEnum((NI)(*t_534942_839829468).kind, (&NTI292244)));
appendChar(LOC272, 41);
internalerror_196113_155036129(LOC272);
result0 = NIM_NIL;
}
break;
}
excl_268841_2627731572(check0, (*t_534942_839829468).Sup.id);
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, iscompiletimeonly_328706_3876443242)(Ttype292840* t0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((IL64(576460752303423744) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
return result0;
}
N_NIMCALL(Tstorageloc292812, paramstorageloc_534098_839829468)(Tsym292834* param0) {
Tstorageloc292812 result0;
result0 = (Tstorageloc292812)0;
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*param0).typ, 8388864);
if (!!(((IL64(281475110993936) &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0))) goto LA4;
result0 = ((Tstorageloc292812) 2);
}
goto LA1;
LA4: ;
{
result0 = ((Tstorageloc292812) 0);
}
LA1: ;
return result0;
}
N_NIMCALL(NIM_BOOL, ccgintroducedptr_533611_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
Ttype292840* pt0;
{ result0 = (NIM_BOOL)0;
pt0 = skiptypes_296099_850551059((*s0).typ, IL64(211106232576256));
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 13))&31U)))!=0)) goto LA3;
result0 = NIM_TRUE;
goto BeforeRet;
}
goto LA1;
LA3: ;
{
if (!(((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 12))&31U)))!=0)) goto LA6;
result0 = NIM_FALSE;
goto BeforeRet;
}
goto LA1;
LA6: ;
LA1: ;
switch ((*pt0).kind) {
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC11;
NI64 LOC13;
LOC11 = (NIM_BOOL)0;
LOC11 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
if (LOC11) goto LA12;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242(pt0);
LOC11 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC13);
LA12: ;
if (!LOC11) goto LA14;
result0 = NIM_TRUE;
}
goto LA9;
LA14: ;
{
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = (((*pt0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (!(LOC17)) goto LA18;
LOC17 = ((*pt0).sons->data[((NI) 0)] == NIM_NIL);
LA18: ;
if (!LOC17) goto LA19;
result0 = NIM_FALSE;
}
goto LA9;
LA19: ;
{
result0 = NIM_TRUE;
}
LA9: ;
}
break;
case ((Ttypekind292244) 18):
{
NIM_BOOL LOC23;
NI64 LOC24;
LOC23 = (NIM_BOOL)0;
LOC24 = (NI64)0;
LOC24 = getsize_320135_3876443242(pt0);
LOC23 = (((NI64) ((NI)(floatsize_176642_4151366050 * ((NI) 2)))) < LOC24);
if (LOC23) goto LA25;
LOC23 = (((*s0).options &(1U<<((NU)(((Toption169009) 18))&31U)))!=0);
LA25: ;
result0 = LOC23;
}
break;
default:
{
result0 = NIM_FALSE;
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Tctypekind529007, mapreturntype_533447_839829468)(Ttype292840* typ0) {
Tctypekind529007 result0;
result0 = (Tctypekind529007)0;
result0 = maptype_533394_839829468(typ0);
return result0;
}
N_NIMCALL(void, genprocparams_534115_839829468)(Tcgen529027* m0, Ttype292840* t0, Ropeobj178006** rettype0, Ropeobj178006** params0, Intset268030* check0, NIM_BOOL declareenvironment0, NIM_BOOL weakdep0) {
unsureAsgnRef((void**) (&(*params0)), NIM_NIL);
{
NIM_BOOL LOC3;
TY533289 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).sons->data[((NI) 0)] == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
unsureAsgnRef((void**) (&(*rettype0)), HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC7, 0));
}
goto LA1;
LA5: ;
{
unsureAsgnRef((void**) (&(*rettype0)), gettypedescaux_533505_839829468(m0, (*t0).sons->data[((NI) 0)], check0));
}
LA1: ;
{
NI i_534152_839829468;
NI HEX3Atmp_534353_839829468;
NI LOC10;
NI res_534356_839829468;
i_534152_839829468 = (NI)0;
HEX3Atmp_534353_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = sonslen_295351_850551059((*t0).n);
HEX3Atmp_534353_839829468 = (NI)(LOC10 - ((NI) 1));
res_534356_839829468 = ((NI) 1);
{
while (1) {
if (!(res_534356_839829468 <= HEX3Atmp_534353_839829468)) goto LA12;
i_534152_839829468 = res_534356_839829468;
{
Tsym292834* param0;
Ropeobj178006* LOC29;
Tstorageloc292812 LOC30;
TY533289 LOC45;
Ropeobj178006* LOC46;
Ttype292840* arr0;
NI j0;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kind == ((Tnodekind292020) 3)))) goto LA16;
internalerror_196100_155036129((*(*t0).n).info, ((NimStringDesc*) &T839829468_109));
}
LA16: ;
param0 = (*(*(*t0).n).kindU.S6.sons->data[i_534152_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC20) goto LA21;
goto LA13;
}
LA21: ;
{
TY533289 LOC27;
Ropeobj178006* LOC28;
if (!!(((*params0) == NIM_NIL))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC27, 0);
add_178482_2381377266(params0, LOC28);
}
LA25: ;
LOC29 = (Ropeobj178006*)0;
LOC29 = manglename_533205_839829468(param0);
LOC30 = (Tstorageloc292812)0;
LOC30 = paramstorageloc_534098_839829468(param0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC29, LOC30);
{
NIM_BOOL LOC33;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC33 = (NIM_BOOL)0;
LOC33 = ccgintroducedptr_533611_839829468(param0);
if (!LOC33) goto LA34;
LOC36 = (Ropeobj178006*)0;
LOC36 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_53), LOC37, 0);
add_178482_2381377266(params0, LOC38);
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
goto LA31;
LA34: ;
{
Ropeobj178006* LOC42;
if (!weakdep0) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = gettypedescweak_534079_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC42);
}
goto LA31;
LA40: ;
{
Ropeobj178006* LOC44;
LOC44 = (Ropeobj178006*)0;
LOC44 = gettypedescaux_533505_839829468(m0, (*param0).typ, check0);
add_178482_2381377266(params0, LOC44);
}
LA31: ;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC45, 0);
add_178482_2381377266(params0, LOC46);
add_178482_2381377266(params0, (*param0).loc.r);
arr0 = (*param0).typ;
{
if (!((*arr0).kind == ((Ttypekind292244) 23))) goto LA49;
arr0 = (*arr0).sons->data[((NI) 0)];
}
LA49: ;
j0 = ((NI) 0);
{
while (1) {
TY532811 LOC57;
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*arr0).kind)&63U)))!=0)) goto LA52;
{
if (!((*(*param0).typ).kind == ((Ttypekind292244) 23))) goto LA55;
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA55: ;
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = (*param0).loc.r;
LOC57[1] = rope_178401_2381377266(((NI64) (j0)));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_112), LOC57, 2);
j0 += ((NI) 1);
arr0 = (*arr0).sons->data[((NI) 0)];
} LA52: ;
}
} LA13: ;
res_534356_839829468 += ((NI) 1);
} LA12: ;
}
}
{
NIM_BOOL LOC60;
Ttype292840* arr0;
TY533289 LOC76;
LOC60 = (NIM_BOOL)0;
LOC60 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
if (!(LOC60)) goto LA61;
LOC60 = isinvalidreturntype_533550_839829468((*t0).sons->data[((NI) 0)]);
LA61: ;
if (!LOC60) goto LA62;
arr0 = (*t0).sons->data[((NI) 0)];
{
if (!!(((*params0) == NIM_NIL))) goto LA66;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA66: ;
{
Tctypekind529007 LOC70;
Ropeobj178006* LOC73;
LOC70 = (Tctypekind529007)0;
LOC70 = mapreturntype_533447_839829468((*t0).sons->data[((NI) 0)]);
if (!!((LOC70 == ((Tctypekind529007) 17)))) goto LA71;
LOC73 = (Ropeobj178006*)0;
LOC73 = gettypedescweak_534079_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC73);
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_53));
}
goto LA68;
LA71: ;
{
Ropeobj178006* LOC75;
LOC75 = (Ropeobj178006*)0;
LOC75 = gettypedescaux_533505_839829468(m0, arr0, check0);
add_178482_2381377266(params0, LOC75);
}
LA68: ;
memset((void*)LOC76, 0, sizeof(LOC76));
addf_179205_2381377266(params0, ((NimStringDesc*) &T839829468_113), LOC76, 0);
}
LA62: ;
{
NIM_BOOL LOC79;
LOC79 = (NIM_BOOL)0;
LOC79 = ((*t0).callconv == ((Tcallingconvention292002) 8));
if (!(LOC79)) goto LA80;
LOC79 = declareenvironment0;
LA80: ;
if (!LOC79) goto LA81;
{
if (!!(((*params0) == NIM_NIL))) goto LA85;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA85: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_114));
}
LA81: ;
{
if (!(((*t0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA89;
{
if (!!(((*params0) == NIM_NIL))) goto LA93;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_110));
}
LA93: ;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_115));
}
LA89: ;
{
if (!((*params0) == NIM_NIL)) goto LA97;
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_116));
}
goto LA95;
LA97: ;
{
add_178487_2381377266(params0, ((NimStringDesc*) &T839829468_117));
}
LA95: ;
unsureAsgnRef((void**) (&(*params0)), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_118), (*params0)));
}
N_NIMCALL(Ropeobj178006*, genprocheader_535867_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Ropeobj178006* result0;
Ropeobj178006* rettype0;
Ropeobj178006* params0;
Intset268030 check0;
Ropeobj178006* LOC13;
result0 = (Ropeobj178006*)0;
rettype0 = (Ropeobj178006*)0;
params0 = (Ropeobj178006*)0;
genclinedir_532813_839829468(&result0, (*prc0).info);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0)) goto LA3;
{
if (!(((*m0).flags &(1U<<((NU)(((Codegenflag529025) 3))&7U)))!=0)) goto LA7;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
goto LA5;
LA7: ;
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_23));
}
LA5: ;
}
goto LA1;
LA3: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_24));
}
goto LA1;
LA11: ;
LA1: ;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
LOC13 = (Ropeobj178006*)0;
LOC13 = manglename_533205_839829468(prc0);
fillloc_532282_839829468((&(*prc0).loc), ((Tlockind292808) 7), (*prc0).typ, LOC13, ((Tstorageloc292812) 0));
genprocparams_534115_839829468(m0, (*prc0).typ, &rettype0, ¶ms0, (&check0), NIM_TRUE, NIM_FALSE);
{
TY535235 LOC18;
if (!(*prc0).constraint == 0) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*(*prc0).typ).callconv)- 0]);
LOC18[1] = rettype0;
LOC18[2] = (*prc0).loc.r;
LOC18[3] = params0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_119), LOC18, 4);
}
goto LA14;
LA16: ;
{
TY535238 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rettype0;
LOC20[1] = (*prc0).loc.r;
LOC20[2] = params0;
result0 = HEX25_178905_2381377266((*(*prc0).constraint).kindU.S3.strval, LOC20, 3);
}
LA14: ;
return result0;
}
static N_INLINE(Tnode292802*, HEX5BHEX5D_293238_850551059)(Tnode292802* n0, NI i0) {
Tnode292802* result0;
result0 = (Tnode292802*)0;
result0 = (*n0).kindU.S6.sons->data[i0];
return result0;
}
N_NIMCALL(Tnode292802*, easyresultasgn_560191_839829468)(Tnode292802* n0) {
Tnode292802* result0;
{ result0 = (Tnode292802*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 115):
case ((Tnodekind292020) 126):
{
NI i0;
i0 = ((NI) 0);
{
while (1) {
NIM_BOOL LOC4;
NI LOC5;
Tnode292802* LOC7;
LOC4 = (NIM_BOOL)0;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
LOC4 = (i0 < LOC5);
if (!(LOC4)) goto LA6;
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(n0, i0);
LOC4 = ((*LOC7).kind == ((Tnodekind292020) 1) || (*LOC7).kind >= ((Tnodekind292020) 79) && (*LOC7).kind <= ((Tnodekind292020) 81) || (*LOC7).kind == ((Tnodekind292020) 84) || (*LOC7).kind == ((Tnodekind292020) 98) || (*LOC7).kind == ((Tnodekind292020) 101) || (*LOC7).kind == ((Tnodekind292020) 125));
LA6: ;
if (!LOC4) goto LA3;
i0 += ((NI) 1);
} LA3: ;
}
{
NI LOC10;
Tnode292802* LOC13;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
if (!(i0 < LOC10)) goto LA11;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(n0, i0);
result0 = easyresultasgn_560191_839829468(LOC13);
}
LA11: ;
}
break;
case ((Tnodekind292020) 73):
case ((Tnodekind292020) 74):
{
{
NIM_BOOL LOC17;
Tnode292802* LOC18;
Tnode292802* LOC20;
LOC17 = (NIM_BOOL)0;
LOC18 = (Tnode292802*)0;
LOC18 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = ((*LOC18).kind == ((Tnodekind292020) 3));
if (!(LOC17)) goto LA19;
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
LOC17 = (((Tsymkind292435) 11) == (*(*LOC20).kindU.S4.sym).kind);
LA19: ;
if (!LOC17) goto LA21;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
result0 = HEX5BHEX5D_293238_850551059(n0, ((NI) 1));
goto BeforeRet;
}
LA21: ;
}
break;
case ((Tnodekind292020) 109):
{
{
NI LOC26;
Tnode292802* LOC29;
LOC26 = (NI)0;
LOC26 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC26)) goto LA27;
LOC29 = (Tnode292802*)0;
LOC29 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
result0 = easyresultasgn_560191_839829468(LOC29);
{
if (!!((result0 == NIM_NIL))) goto LA32;
(*n0).flags |= ((NU16)1)<<((((Tnodeflag292427) 14))%(sizeof(NU16)*8));
}
LA32: ;
}
LA27: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, gettypedesc_535673_839829468)(Tcgen529027* m0, Ttype292840* typ0) {
Ropeobj178006* result0;
Intset268030 check0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettypedescaux_533505_839829468(m0, typ0, (&check0));
return result0;
}
N_NIMCALL(Ropeobj178006*, localvardecl_538532_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 2), (*s0).typ, LOC5, ((Tstorageloc292812) 2));
{
if (!((*s0).kind == ((Tsymkind292435) 9))) goto LA8;
(*s0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA8: ;
}
LA3: ;
result0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
if (!(*s0).constraint == 0) goto LA12;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA16;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_121));
}
LA16: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA20;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_122));
}
LA20: ;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_111));
add_178482_2381377266(&result0, (*s0).loc.r);
}
goto LA10;
LA12: ;
{
TY532811 LOC23;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = result0;
LOC23[1] = (*s0).loc.r;
result0 = HEX25_178905_2381377266((*(*s0).constraint).kindU.S3.strval, LOC23, 2);
}
LA10: ;
return result0;
}
N_NIMCALL(void, initloc_532273_839829468)(Tloc292816* result0, Tlockind292808 k0, Ttype292840* typ0, Tstorageloc292812 s0) {
(*result0).k = k0;
(*result0).s = s0;
unsureAsgnRef((void**) (&(*result0).t), typ0);
unsureAsgnRef((void**) (&(*result0).r), NIM_NIL);
(*result0).flags = 0;
}
N_NIMCALL(void, initlocexprsingleuse_539289_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
(*result0).flags |= ((NU16)1)<<((((Tlocflag292810) 8))%(sizeof(NU16)*8));
expr_539248_839829468(p0, e0, result0);
}
static N_INLINE(Ropeobj178006**, s_529179_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].sections[(s0)- 0];
return result0;
}
N_NIMCALL(Ropeobj178006*, indentline_532656_839829468)(Tcproc529021* p0, Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = r0;
{
NI i_532680_839829468;
NI HEX3Atmp_532683_839829468;
NI res_532686_839829468;
i_532680_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)0;
HEX3Atmp_532683_839829468 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
res_532686_839829468 = ((NI) 0);
{
while (1) {
if (!(res_532686_839829468 <= HEX3Atmp_532683_839829468)) goto LA3;
i_532680_839829468 = res_532686_839829468;
prepend_178893_2381377266(&result0, indent_532655_839829468);
res_532686_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(void, linefmt_532714_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(Ropeobj178006*, rdloc_538188_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
TY178507 LOC5;
if (!(((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(void, line_532690_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, Ropeobj178006* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = indentline_532656_839829468(p0, r0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(void, linef_532700_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentypeinfoauxbase_535960_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0, Ropeobj178006* base0) {
NI nimtypekind0;
Ropeobj178006* size0;
TY535235 LOC17;
NI flags0;
Ropeobj178006* LOC33;
TY532811 LOC34;
NimStringDesc* LOC35;
nimtypekind0 = (NI)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isobjlackingtypefield_533515_839829468(typ0);
if (!LOC3) goto LA4;
nimtypekind0 = ((NI) 18);
}
goto LA1;
LA4: ;
{
nimtypekind0 = ((NI) ((*typ0).kind));
}
LA1: ;
size0 = (Ropeobj178006*)0;
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0)) goto LA9;
size0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_133));
}
goto LA7;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC12) goto LA13;
LOC12 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
size0 = gettypedesc_535673_839829468(m0, origtype0);
}
goto LA7;
LA14: ;
{
size0 = gettypedesc_535673_839829468(m0, typ0);
}
LA7: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = name0;
LOC17[1] = size0;
LOC17[2] = rope_178401_2381377266(((NI64) (nimtypekind0)));
LOC17[3] = base0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_134), LOC17, 4);
flags0 = ((NI) 0);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = containsgarbagecollectedref_320117_3876443242(typ0);
if (!!(LOC20)) goto LA21;
flags0 = (NI)(flags0 | ((NI) 1));
}
LA21: ;
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = canformacycle_320123_3876443242(typ0);
if (!!(LOC25)) goto LA26;
flags0 = (NI)(flags0 | ((NI) 2));
}
LA26: ;
{
TY532811 LOC32;
if (!!((flags0 == ((NI) 0)))) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
LOC32[1] = rope_178401_2381377266(((NI64) (flags0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_135), LOC32, 2);
}
LA30: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = name0;
LOC35 = (NimStringDesc*)0;
LOC35 = typetostring_320017_3876443242(typ0, ((Tprefereddesc320011) 0));
LOC34[1] = rope_178277_2381377266(LOC35);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_136), LOC34, 2);
}
N_NIMCALL(Ropeobj178006*, getnimnode_535945_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
TY532811 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = (*m0).typenodesname;
LOC1[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC1, 2);
(*m0).typenodes += ((NI) 1);
return result0;
}
N_NIMCALL(void, gentupleinfo_536551_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
Ropeobj178006* expr0;
NI length0;
TY532811 LOC15;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
expr0 = getnimnode_535945_839829468(m0);
length0 = sonslen_295327_850551059(typ0);
{
Ropeobj178006* tmp0;
TY532811 LOC6;
TY535238 LOC12;
if (!(((NI) 0) < length0)) goto LA4;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = tmp0;
LOC6[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC6, 2);
{
NI i_536573_839829468;
NI HEX3Atmp_536592_839829468;
NI res_536595_839829468;
i_536573_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)0;
HEX3Atmp_536592_839829468 = (NI)(length0 - ((NI) 1));
res_536595_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* a0;
Ropeobj178006* tmp20;
TY535238 LOC10;
TY535235 LOC11;
if (!(res_536595_839829468 <= HEX3Atmp_536592_839829468)) goto LA9;
i_536573_839829468 = res_536595_839829468;
a0 = (*typ0).sons->data[i_536573_839829468];
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0;
LOC10[1] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC10[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC10, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = tmp20;
LOC11[1] = gettypedesc_535673_839829468(m0, typ0);
LOC11[2] = rope_178401_2381377266(((NI64) (i_536573_839829468)));
LOC11[3] = gentypeinfo_535941_839829468(m0, a0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_141), LOC11, 4);
res_536595_839829468 += ((NI) 1);
} LA9: ;
}
}
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = expr0;
LOC12[1] = rope_178401_2381377266(((NI64) (length0)));
LOC12[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC12, 3);
}
goto LA2;
LA4: ;
{
TY532811 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC14, 2);
}
LA2: ;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = name0;
LOC15[1] = expr0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC15, 2);
}
N_NIMCALL(Ttype292840*, fakeclosuretype_537010_839829468)(Tsym292834* owner0) {
Ttype292840* result0;
Ttype292840* LOC1;
Ttype292840* r0;
Ttype292840* LOC2;
result0 = (Ttype292840*)0;
result0 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
LOC1 = (Ttype292840*)0;
LOC1 = newtype_295107_850551059(((Ttypekind292244) 26), owner0);
rawaddson_296394_850551059(result0, LOC1);
r0 = newtype_295107_850551059(((Ttypekind292244) 22), owner0);
LOC2 = (Ttype292840*)0;
LOC2 = newtype_295107_850551059(((Ttypekind292244) 18), owner0);
rawaddson_296394_850551059(r0, LOC2);
rawaddson_296394_850551059(result0, r0);
return result0;
}
N_NIMCALL(void, gentypeinfoaux_536027_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* base0;
base0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NI LOC4;
Ttype292840* x0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = sonslen_295327_850551059(typ0);
LOC3 = (((NI) 0) < LOC4);
if (!(LOC3)) goto LA5;
LOC3 = !(((*typ0).sons->data[((NI) 0)] == NIM_NIL));
LA5: ;
if (!LOC3) goto LA6;
x0 = (*typ0).sons->data[((NI) 0)];
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA10;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA10: ;
base0 = gentypeinfo_535941_839829468(m0, x0);
}
goto LA1;
LA6: ;
{
base0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
}
LA1: ;
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, base0);
}
static N_INLINE(NIM_BOOL, iscomplexvaluetype_538317_839829468)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((983056 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = LOC3;
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, usestringh_532345_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC5;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 4))&7U)))!=0))) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 4))%(sizeof(NU8)*8));
LOC5 = (NIM_BOOL)0;
LOC5 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_151));
}
LA3: ;
}
N_NIMCALL(Ropeobj178006*, addrloc_538204_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*a0).r;
{
NIM_BOOL LOC3;
Tctypekind529007 LOC5;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*a0).flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
if (!(LOC3)) goto LA4;
LOC5 = (Tctypekind529007)0;
LOC5 = maptype_533394_839829468((*a0).t);
LOC3 = !((LOC5 == ((Tctypekind529007) 17)));
LA4: ;
if (!LOC3) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_128), result0);
result0 = HEX26_178447_2381377266(LOC8, ((NimStringDesc*) &T839829468_117));
}
LA6: ;
return result0;
}
N_NIMCALL(void, genobjectinit_538242_839829468)(Tcproc529021* p0, Tcprocsection529011 section0, Ttype292840* t0, Tloc292816* a0, NIM_BOOL takeaddr0) {
Ttypefieldresult320145 LOC1;
LOC1 = (Ttypefieldresult320145)0;
LOC1 = analyseobjectwithtypefield_320149_3876443242(t0);
switch (LOC1) {
case ((Ttypefieldresult320145) 0):
{
}
break;
case ((Ttypefieldresult320145) 1):
{
Ropeobj178006* r0;
Ttype292840* s0;
TY532811 LOC19;
r0 = rdloc_538188_839829468(a0);
{
TY178507 LOC8;
if (!!(takeaddr0)) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC8, 1);
}
LA6: ;
s0 = skiptypes_296099_850551059(t0, IL64(211106232576256));
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
if (!!(LOC11)) goto LA13;
{
while (1) {
NIM_BOOL LOC17;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*s0).kind == ((Ttypekind292244) 17));
if (!(LOC17)) goto LA18;
LOC17 = !(((*s0).sons->data[((NI) 0)] == NIM_NIL));
LA18: ;
if (!LOC17) goto LA16;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
s0 = skiptypes_296099_850551059((*s0).sons->data[((NI) 0)], IL64(211106247215360));
} LA16: ;
}
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = r0;
LOC19[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_154), LOC19, 2);
}
break;
case ((Ttypefieldresult320145) 2):
{
Ropeobj178006* r0;
TY532811 LOC26;
{
if (!takeaddr0) goto LA23;
r0 = addrloc_538204_839829468(a0);
}
goto LA21;
LA23: ;
{
r0 = rdloc_538188_839829468(a0);
}
LA21: ;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = r0;
LOC26[1] = gentypeinfo_535941_839829468((*p0).module, t0);
linefmt_532714_839829468(p0, section0, ((NimStringDesc*) &T839829468_155), LOC26, 2);
}
break;
}
}
N_NIMCALL(void, constructloc_538388_839829468)(Tcproc529021* p0, Tloc292816* loc0, NIM_BOOL istemp0) {
Ttype292840* typ0;
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC3)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468(loc0);
LOC6[1] = gettypedesc_535673_839829468((*p0).module, typ0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_150), LOC6, 2);
}
goto LA1;
LA4: ;
{
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = !(istemp0);
if (LOC10) goto LA11;
LOC10 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
LA11: ;
if (!LOC10) goto LA12;
{
NIM_BOOL LOC16;
TY532811 LOC19;
LOC16 = (NIM_BOOL)0;
LOC16 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC16)) goto LA17;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(loc0);
LOC19[1] = rdloc_538188_839829468(loc0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC19, 2);
}
LA17: ;
}
LA12: ;
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, loc0, NIM_TRUE);
}
LA1: ;
}
N_NIMCALL(void, gettemp_537032_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* result0, NIM_BOOL needsinit0) {
Ropeobj178006* LOC1;
TY532811 LOC2;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
unsureAsgnRef((void**) (&(*result0).r), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_149), LOC1));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC2[1] = (*result0).r;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC2, 2);
(*result0).k = ((Tlockind292808) 1);
unsureAsgnRef((void**) (&(*result0).t), t0);
(*result0).s = ((Tstorageloc292812) 2);
(*result0).flags = 0;
constructloc_538388_839829468(p0, (&(*result0)), !(needsinit0));
}
static N_INLINE(Ropeobj178006*, parentobj_537257_839829468)(Ropeobj178006* accessor0, Tcgen529027* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
TY178507 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = accessor0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_161), LOC7, 1);
}
goto LA1;
LA5: ;
{
result0 = accessor0;
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, intliteral_539270_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (IL64(-2147483648) < i0);
if (!(LOC3)) goto LA4;
LOC3 = (i0 <= IL64(2147483647));
LA4: ;
if (!LOC3) goto LA5;
result0 = rope_178401_2381377266(i0);
}
goto LA1;
LA5: ;
{
TY533289 LOC10;
if (!(i0 == IL64(-2147483648))) goto LA8;
memset((void*)LOC10, 0, sizeof(LOC10));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_166), LOC10, 0);
}
goto LA1;
LA8: ;
{
TY178507 LOC14;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC14, 1);
}
goto LA1;
LA12: ;
{
TY533289 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC16, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, int64literal_549430_839829468)(NI64 i0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((IL64(-9223372036854775807) - IL64(1)) < i0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(i0);
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_167), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_168), LOC7, 0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, uint64literal_549442_839829468)(NU64 i0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
NimStringDesc* LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_8401_1689653243(i0);
LOC1 = rawNewString(LOC2->Sup.len + 3);
appendString(LOC1, LOC2);
appendString(LOC1, ((NimStringDesc*) &T839829468_171));
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(Ropeobj178006*, getstrlit_549468_839829468)(Tcgen529027* m0, NimStringDesc* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
TY535238 LOC2;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_79));
result0 = gettempname_533598_839829468(m0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = result0;
LOC2[1] = makecstring_191638_155036129(s0);
LOC2[2] = rope_178401_2381377266(((NI64) ((s0 ? s0->Sup.len : 0))));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_177), LOC2, 3);
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_549476_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* ty0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(ty0 == NIM_NIL)) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_165));
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 5) ... ((Tnodekind292020) 15):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
switch ((*LOC6).kind) {
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 5):
{
result0 = intliteral_539270_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 1):
{
{
TY533289 LOC13;
if (!!(((*n0).kindU.S1.intval == IL64(0)))) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_169), LOC13, 0);
}
goto LA9;
LA11: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_170), LOC15, 0);
}
LA9: ;
}
break;
case ((Ttypekind292244) 35):
{
result0 = int64literal_549430_839829468((*n0).kindU.S1.intval);
}
break;
case ((Ttypekind292244) 44):
{
result0 = uint64literal_549442_839829468(((NU64) ((*n0).kindU.S1.intval)));
}
break;
default:
{
TY532811 LOC19;
Ttype292840* LOC20;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (Ttype292840*)0;
LOC20 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
LOC19[0] = gettypedesc_535673_839829468((*p0).module, LOC20);
LOC19[1] = intliteral_539270_839829468((*n0).kindU.S1.intval);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_172), LOC19, 2);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
{
NIM_BOOL LOC24;
NI id0;
Ropeobj178006* LOC28;
LOC24 = (NIM_BOOL)0;
LOC24 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC24)) goto LA25;
LOC24 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA25: ;
if (!LOC24) goto LA26;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC28);
{
TY532811 LOC33;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA31;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC33[1] = result0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_173), LOC33, 2);
}
LA31: ;
}
goto LA22;
LA26: ;
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
}
LA22: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
{
TY533289 LOC40;
if (!(*n0).kindU.S3.strval == 0) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_175), LOC40, 0);
}
goto LA36;
LA38: ;
{
Ttype292840* LOC42;
NI id0;
LOC42 = (Ttype292840*)0;
LOC42 = skiptypes_296099_850551059(ty0, IL64(211106242013440));
if (!((*LOC42).kind == ((Ttypekind292244) 28))) goto LA43;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
{
TY178507 LOC49;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA47;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = getstrlit_549468_839829468((*p0).module, (*n0).kindU.S3.strval);
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_176), LOC49, 1);
}
goto LA45;
LA47: ;
{
TY532811 LOC51;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = (*(*p0).module).tmpbase;
LOC51[1] = rope_178401_2381377266(((NI64) (id0)));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_178), LOC51, 2);
}
LA45: ;
}
goto LA36;
LA43: ;
{
result0 = makecstring_191638_155036129((*n0).kindU.S3.strval);
}
LA36: ;
}
break;
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 18):
{
NimStringDesc* LOC54;
LOC54 = (NimStringDesc*)0;
LOC54 = tostrmaxprecision_298007_3471544153((*n0).kindU.S2.floatval);
result0 = rope_178277_2381377266(LOC54);
}
break;
default:
{
NimStringDesc* LOC56;
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 12);
appendString(LOC56, ((NimStringDesc*) &T839829468_179));
appendString(LOC56, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendChar(LOC56, 41);
internalerror_196100_155036129((*n0).info, LOC56);
result0 = NIM_NIL;
}
break;
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genliteral_539273_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = genliteral_549476_839829468(p0, n0, (*n0).typ);
return result0;
}
N_NIMCALL(void, gencaserange_537028_839829468)(Tcproc529021* p0, Tnode292802* branch0) {
NI length0;
length0 = len_293081_850551059(branch0);
{
NI j_547677_839829468;
NI HEX3Atmp_547718_839829468;
NI res_547721_839829468;
j_547677_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)0;
HEX3Atmp_547718_839829468 = (NI)(length0 - ((NI) 2));
res_547721_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547721_839829468 <= HEX3Atmp_547718_839829468)) goto LA3;
j_547677_839829468 = res_547721_839829468;
{
Tnode292802* LOC6;
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
if (!((*LOC6).kind == ((Tnodekind292020) 44))) goto LA7;
{
TY532811 LOC13;
Tnode292802* LOC14;
Tnode292802* LOC15;
Tnode292802* LOC16;
Tnode292802* LOC17;
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0)) goto LA11;
memset((void*)LOC13, 0, sizeof(LOC13));
LOC14 = (Tnode292802*)0;
LOC14 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(LOC14, ((NI) 0));
LOC13[0] = genliteral_539273_839829468(p0, LOC15);
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC17 = (Tnode292802*)0;
LOC17 = HEX5BHEX5D_293238_850551059(LOC16, ((NI) 1));
LOC13[1] = genliteral_539273_839829468(p0, LOC17);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_164), LOC13, 2);
}
goto LA9;
LA11: ;
{
Tnode292802* v0;
Tnode292802* LOC19;
Tnode292802* LOC20;
LOC19 = (Tnode292802*)0;
LOC19 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC20 = (Tnode292802*)0;
LOC20 = HEX5BHEX5D_293238_850551059(LOC19, ((NI) 0));
v0 = copynode_296528_850551059(LOC20);
{
while (1) {
Tnode292802* LOC23;
Tnode292802* LOC24;
TY178507 LOC25;
LOC23 = (Tnode292802*)0;
LOC23 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC24 = (Tnode292802*)0;
LOC24 = HEX5BHEX5D_293238_850551059(LOC23, ((NI) 1));
if (!((*v0).kindU.S1.intval <= (*LOC24).kindU.S1.intval)) goto LA22;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = genliteral_539273_839829468(p0, v0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC25, 1);
(*v0).kindU.S1.intval += ((NI) 1);
} LA22: ;
}
}
LA9: ;
}
goto LA4;
LA7: ;
{
TY178507 LOC27;
Tnode292802* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(branch0, j_547677_839829468);
LOC27[0] = genliteral_539273_839829468(p0, LOC28);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_180), LOC27, 1);
}
LA4: ;
res_547721_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, gentraverseproc_537039_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Tnode292802* n0) {
{ {
if (!(n0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
{
NI i_537068_839829468;
NI HEX3Atmp_537239_839829468;
NI LOC7;
NI res_537242_839829468;
i_537068_839829468 = (NI)0;
HEX3Atmp_537239_839829468 = (NI)0;
LOC7 = (NI)0;
LOC7 = sonslen_295351_850551059(n0);
HEX3Atmp_537239_839829468 = (NI)(LOC7 - ((NI) 1));
res_537242_839829468 = ((NI) 0);
{
while (1) {
if (!(res_537242_839829468 <= HEX3Atmp_537239_839829468)) goto LA9;
i_537068_839829468 = res_537242_839829468;
gentraverseproc_537039_839829468(c0, accessor0, (*n0).kindU.S6.sons->data[i_537068_839829468]);
res_537242_839829468 += ((NI) 1);
} LA9: ;
}
}
}
break;
case ((Tnodekind292020) 139):
{
Tcproc529021* p0;
Tsym292834* disc0;
TY532811 LOC15;
TY533289 LOC28;
{
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)))) goto LA13;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_162));
}
LA13: ;
p0 = (*c0).p;
disc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = accessor0;
LOC15[1] = (*disc0).loc.r;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_163), LOC15, 2);
{
NI i_537098_839829468;
NI HEX3Atmp_537249_839829468;
NI LOC17;
NI res_537252_839829468;
i_537098_839829468 = (NI)0;
HEX3Atmp_537249_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(n0);
HEX3Atmp_537249_839829468 = (NI)(LOC17 - ((NI) 1));
res_537252_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC26;
TY533289 LOC27;
if (!(res_537252_839829468 <= HEX3Atmp_537249_839829468)) goto LA19;
i_537098_839829468 = res_537252_839829468;
branch0 = (*n0).kindU.S6.sons->data[i_537098_839829468];
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA22;
gencaserange_537028_839829468((*c0).p, branch0);
}
goto LA20;
LA22: ;
{
TY533289 LOC25;
memset((void*)LOC25, 0, sizeof(LOC25));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC25, 0);
}
LA20: ;
LOC26 = (Tnode292802*)0;
LOC26 = lastson_295364_850551059(branch0);
gentraverseproc_537039_839829468(c0, accessor0, LOC26);
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC27, 0);
res_537252_839829468 += ((NI) 1);
} LA19: ;
}
}
memset((void*)LOC28, 0, sizeof(LOC28));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_183), LOC28, 0);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
TY532811 LOC34;
Ropeobj178006* LOC35;
field0 = (*n0).kindU.S4.sym;
{
if (!((*field0).loc.t == NIM_NIL)) goto LA32;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
LA32: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = accessor0;
LOC34[1] = (*field0).loc.r;
LOC35 = (Ropeobj178006*)0;
LOC35 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC34, 2);
gentraverseproc_537022_839829468(c0, LOC35, (*field0).loc.t);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_184));
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, linecg_532707_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
N_NIMCALL(void, gentraverseproc_537022_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ_537027_839829468) {
Ttype292840* typ_537302_839829468;
Tcproc529021* p0;
{ {
if (!(typ_537027_839829468 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
typ_537302_839829468 = getuniquetype_528640_2036603609(typ_537027_839829468);
p0 = (*c0).p;
switch ((*typ_537302_839829468).kind) {
case ((Ttypekind292244) 11):
case ((Ttypekind292244) 10):
case ((Ttypekind292244) 8):
{
Ttype292840* LOC6;
LOC6 = (Ttype292840*)0;
LOC6 = lastson_295377_850551059(typ_537302_839829468);
gentraverseproc_537022_839829468(c0, accessor0, LOC6);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
NI64 arraysize0;
Tloc292816 i0;
Ttype292840* LOC8;
TY532811 LOC9;
TY532811 LOC10;
Ropeobj178006* LOC11;
TY533289 LOC12;
arraysize0 = lengthord_320007_3876443242((*typ_537302_839829468).sons->data[((NI) 0)]);
memset((void*)(&i0), 0, sizeof(i0));
LOC8 = (Ttype292840*)0;
LOC8 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC8, (&i0), NIM_FALSE);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = i0.r;
LOC9[1] = rope_178401_2381377266(arraysize0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_159), LOC9, 2);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = accessor0;
LOC10[1] = i0.r;
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC10, 2);
gentraverseproc_537022_839829468(c0, LOC11, (*typ_537302_839829468).sons->data[((NI) 1)]);
memset((void*)LOC12, 0, sizeof(LOC12));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC12, 0);
}
break;
case ((Ttypekind292244) 17):
{
{
NI i_537325_839829468;
NI HEX3Atmp_537384_839829468;
NI LOC15;
NI res_537387_839829468;
i_537325_839829468 = (NI)0;
HEX3Atmp_537384_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = sonslen_295327_850551059(typ_537302_839829468);
HEX3Atmp_537384_839829468 = (NI)(LOC15 - ((NI) 1));
res_537387_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* x0;
Ropeobj178006* LOC22;
if (!(res_537387_839829468 <= HEX3Atmp_537384_839829468)) goto LA17;
i_537325_839829468 = res_537387_839829468;
x0 = (*typ_537302_839829468).sons->data[i_537325_839829468];
{
if (!!((x0 == NIM_NIL))) goto LA20;
x0 = skiptypes_296099_850551059(x0, IL64(211106247215360));
}
LA20: ;
LOC22 = (Ropeobj178006*)0;
LOC22 = parentobj_537257_839829468(accessor0, (*(*c0).p).module);
gentraverseproc_537022_839829468(c0, LOC22, x0);
res_537387_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!!(((*typ_537302_839829468).n == NIM_NIL))) goto LA25;
gentraverseproc_537039_839829468(c0, accessor0, (*typ_537302_839829468).n);
}
LA25: ;
}
break;
case ((Ttypekind292244) 18):
{
Ttype292840* typ0;
typ0 = getuniquetype_528640_2036603609(typ_537302_839829468);
{
NI i_537363_839829468;
NI HEX3Atmp_537392_839829468;
NI LOC29;
NI res_537395_839829468;
i_537363_839829468 = (NI)0;
HEX3Atmp_537392_839829468 = (NI)0;
LOC29 = (NI)0;
LOC29 = sonslen_295327_850551059(typ0);
HEX3Atmp_537392_839829468 = (NI)(LOC29 - ((NI) 1));
res_537395_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC32;
Ropeobj178006* LOC33;
if (!(res_537395_839829468 <= HEX3Atmp_537392_839829468)) goto LA31;
i_537363_839829468 = res_537395_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = accessor0;
LOC32[1] = rope_178401_2381377266(((NI64) (i_537363_839829468)));
LOC33 = (Ropeobj178006*)0;
LOC33 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_185), LOC32, 2);
gentraverseproc_537022_839829468(c0, LOC33, (*typ0).sons->data[i_537363_839829468]);
res_537395_839829468 += ((NI) 1);
} LA31: ;
}
}
}
break;
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY178507 LOC35;
memset((void*)LOC35, 0, sizeof(LOC35));
LOC35[0] = accessor0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC35, 1);
}
break;
case ((Ttypekind292244) 25):
{
{
TY178507 LOC41;
TY178507 LOC42;
if (!((*typ_537302_839829468).callconv == ((Tcallingconvention292002) 8))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = accessor0;
LOC41[0] = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_186), LOC42, 1);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), (*c0).visitorfrmt, LOC41, 1);
}
LA39: ;
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, gentraverseprocseq_537399_839829468)(Ttraversalclosure537019* c0, Ropeobj178006* accessor0, Ttype292840* typ0) {
Tcproc529021* p0;
Tloc292816 i0;
Ttype292840* LOC1;
TY535238 LOC2;
NimStringDesc* LOC3;
TY532811 LOC11;
Ropeobj178006* LOC12;
TY533289 LOC13;
p0 = (*c0).p;
memset((void*)(&i0), 0, sizeof(i0));
LOC1 = (Ttype292840*)0;
LOC1 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC1, (&i0), NIM_FALSE);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = i0.r;
LOC2[1] = accessor0;
LOC3 = (NimStringDesc*)0;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*(*c0).p).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!LOC6) goto LA8;
LOC3 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA4;
LA8: ;
{
LOC3 = copyString(((NimStringDesc*) &T839829468_158));
}
LA4: ;
LOC2[2] = rope_178277_2381377266(LOC3);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_156), LOC2, 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = accessor0;
LOC11[1] = i0.r;
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_187), LOC11, 2);
gentraverseproc_537022_839829468(c0, LOC12, (*typ0).sons->data[((NI) 0)]);
memset((void*)LOC13, 0, sizeof(LOC13));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC13, 0);
}
N_NIMCALL(Ropeobj178006*, gentraverseproc_537632_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttypeinforeason537016 reason0) {
Ropeobj178006* result0;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* header0;
TY178507 LOC3;
Ropeobj178006* t0;
TY178507 LOC4;
TY178507 LOC5;
Ropeobj178006* generatedproc0;
TY535235 LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
Ropeobj178006** LOC23;
TY178507 LOC24;
result0 = (Ropeobj178006*)0;
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
result0 = gettempname_533598_839829468(m0);
switch (reason0) {
case ((Ttypeinforeason537016) 0):
{
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_145));
}
break;
default:
{
}
break;
}
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_146), LOC3, 1);
t0 = gettypedesc_535673_839829468(m0, typ0);
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_147), LOC4, 1);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = t0;
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_148), LOC5, 1);
c0.p = p0;
{
Ropeobj178006* LOC10;
if (!((*typ0).kind == ((Ttypekind292244) 24))) goto LA8;
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseprocseq_537399_839829468((&c0), LOC10, typ0);
}
goto LA6;
LA8: ;
{
{
Ttype292840* LOC14;
Ropeobj178006* LOC17;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*typ0).sons->data[((NI) 0)], IL64(211106232576256));
if (!((65552 &((NU64)1<<((NU)((*LOC14).kind)&63U)))!=0)) goto LA15;
LOC17 = (Ropeobj178006*)0;
LOC17 = rope_178277_2381377266(((NimStringDesc*) &T839829468_188));
gentraverseproc_537022_839829468((&c0), LOC17, (*typ0).sons->data[((NI) 0)]);
}
goto LA12;
LA15: ;
{
Ropeobj178006* LOC19;
LOC19 = (Ropeobj178006*)0;
LOC19 = rope_178277_2381377266(((NimStringDesc*) &T839829468_189));
gentraverseproc_537022_839829468((&c0), LOC19, (*typ0).sons->data[((NI) 0)]);
}
LA12: ;
}
LA6: ;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = header0;
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC20[1] = (*LOC21);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC20[2] = (*LOC22);
LOC23 = (Ropeobj178006**)0;
LOC23 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC20[3] = (*LOC23);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC20, 4);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC24, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, genarrayinfo_537005_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*typ0).sons->data[((NI) 1)]);
gentypeinfoauxbase_535960_839829468(m0, typ0, typ0, name0, LOC1);
}
N_NIMCALL(void, gensetinfo_536867_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY535238 LOC1;
NI64 LOC2;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
tmp0 = getnimnode_535945_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(typ0);
LOC1[1] = rope_178401_2381377266(LOC2);
LOC1[2] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_193), LOC1, 3);
}
N_NIMCALL(void, genenuminfo_536599_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ropeobj178006* name0) {
Ropeobj178006* nodeptrs0;
NI length0;
TY532811 LOC1;
Ropeobj178006* enumnames0;
Ropeobj178006* specialcases0;
NI firstnimnode0;
NIM_BOOL hasholes0;
Ropeobj178006* enumarray0;
Ropeobj178006* counter0;
TY178507 LOC24;
TY535238 LOC25;
TY536847 LOC26;
TY535235 LOC27;
gentypeinfoaux_536027_839829468(m0, typ0, typ0, name0);
nodeptrs0 = gettempname_533598_839829468(m0);
length0 = sonslen_295351_850551059((*typ0).n);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = nodeptrs0;
LOC1[1] = rope_178401_2381377266(((NI64) (length0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC1, 2);
enumnames0 = (Ropeobj178006*)0;
specialcases0 = (Ropeobj178006*)0;
firstnimnode0 = (*m0).typenodes;
hasholes0 = NIM_FALSE;
{
NI i_536624_839829468;
NI HEX3Atmp_536860_839829468;
NI res_536863_839829468;
i_536624_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)0;
HEX3Atmp_536860_839829468 = (NI)(length0 - ((NI) 1));
res_536863_839829468 = ((NI) 0);
{
while (1) {
Tsym292834* field0;
Ropeobj178006* elemnode0;
if (!(res_536863_839829468 <= HEX3Atmp_536860_839829468)) goto LA4;
i_536624_839829468 = res_536863_839829468;
field0 = (*(*(*typ0).n).kindU.S6.sons->data[i_536624_839829468]).kindU.S4.sym;
elemnode0 = getnimnode_535945_839829468(m0);
{
Ropeobj178006* LOC9;
if (!((*field0).ast == NIM_NIL)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = makecstring_191638_155036129((*(*field0).name).s);
add_178482_2381377266(&enumnames0, LOC9);
}
goto LA5;
LA7: ;
{
Ropeobj178006* LOC11;
LOC11 = (Ropeobj178006*)0;
LOC11 = makecstring_191638_155036129((*(*field0).ast).kindU.S3.strval);
add_178482_2381377266(&enumnames0, LOC11);
}
LA5: ;
{
NimStringDesc* LOC16;
if (!(i_536624_839829468 < (NI)(length0 - ((NI) 1)))) goto LA14;
LOC16 = (NimStringDesc*)0;
LOC16 = rawNewString(tnl_176644_4151366050->Sup.len + 2);
appendString(LOC16, ((NimStringDesc*) &T839829468_110));
appendString(LOC16, tnl_176644_4151366050);
add_178487_2381377266(&enumnames0, LOC16);
}
LA14: ;
{
NIM_BOOL LOC19;
TY532811 LOC23;
LOC19 = (NIM_BOOL)0;
LOC19 = !(((*field0).position == i_536624_839829468));
if (LOC19) goto LA20;
LOC19 = (((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0);
LA20: ;
if (!LOC19) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = elemnode0;
LOC23[1] = rope_178401_2381377266(((NI64) ((*field0).position)));
addf_179205_2381377266(&specialcases0, ((NimStringDesc*) &T839829468_194), LOC23, 2);
hasholes0 = NIM_TRUE;
}
LA21: ;
res_536863_839829468 += ((NI) 1);
} LA4: ;
}
}
enumarray0 = gettempname_533598_839829468(m0);
counter0 = gettempname_533598_839829468(m0);
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = counter0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_195), LOC24, 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = enumarray0;
LOC25[1] = rope_178401_2381377266(((NI64) (length0)));
LOC25[2] = enumnames0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_196), LOC25, 3);
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = counter0;
LOC26[1] = rope_178401_2381377266(((NI64) (length0)));
LOC26[2] = (*m0).typenodesname;
LOC26[3] = rope_178401_2381377266(((NI64) (firstnimnode0)));
LOC26[4] = enumarray0;
LOC26[5] = nodeptrs0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_197), LOC26, 6);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], specialcases0);
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = getnimnode_535945_839829468(m0);
LOC27[1] = rope_178401_2381377266(((NI64) (length0)));
LOC27[2] = nodeptrs0;
LOC27[3] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_198), LOC27, 4);
{
TY178507 LOC32;
if (!hasholes0) goto LA30;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = name0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_199), LOC32, 1);
}
LA30: ;
}
N_NIMCALL(Ropeobj178006*, discriminatortablename_536057_839829468)(Tcgen529027* m0, Ttype292840* objtype_536060_839829468, Tsym292834* d0) {
Ropeobj178006* result0;
Ttype292840* objtype0;
TY532811 LOC8;
NimStringDesc* LOC9;
result0 = (Ropeobj178006*)0;
objtype0 = objtype_536060_839829468;
{
while (1) {
Tsym292834* LOC3;
LOC3 = (Tsym292834*)0;
LOC3 = lookupinrecord_299119_2984716966((*objtype0).n, (*d0).name);
if (!(LOC3 == NIM_NIL)) goto LA2;
objtype0 = (*objtype0).sons->data[((NI) 0)];
} LA2: ;
}
{
if (!((*objtype0).sym == NIM_NIL)) goto LA6;
internalerror_196100_155036129((*d0).info, ((NimStringDesc*) &T839829468_200));
}
LA6: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) ((*objtype0).Sup.id)));
LOC9 = (NimStringDesc*)0;
LOC9 = mangle_528847_2036603609((*(*d0).name).s);
LOC8[1] = rope_178277_2381377266(LOC9);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_201), LOC8, 2);
return result0;
}
N_NIMCALL(void, genobjectfields_536104_839829468)(Tcgen529027* m0, Ttype292840* typ0, Tnode292802* n0, Ropeobj178006* expr0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 138):
{
NI L0;
L0 = sonslen_295351_850551059(n0);
{
if (!(L0 == ((NI) 1))) goto LA4;
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[((NI) 0)], expr0);
}
goto LA2;
LA4: ;
{
Ropeobj178006* tmp0;
TY532811 LOC9;
TY535238 LOC14;
if (!(((NI) 0) < L0)) goto LA7;
tmp0 = gettempname_533598_839829468(m0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = tmp0;
LOC9[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_139), LOC9, 2);
{
NI i_536127_839829468;
NI HEX3Atmp_536482_839829468;
NI res_536485_839829468;
i_536127_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)0;
HEX3Atmp_536482_839829468 = (NI)(L0 - ((NI) 1));
res_536485_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* tmp20;
TY535238 LOC13;
if (!(res_536485_839829468 <= HEX3Atmp_536482_839829468)) goto LA12;
i_536127_839829468 = res_536485_839829468;
tmp20 = getnimnode_535945_839829468(m0);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = tmp0;
LOC13[1] = rope_178401_2381377266(((NI64) (i_536127_839829468)));
LOC13[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC13, 3);
genobjectfields_536104_839829468(m0, typ0, (*n0).kindU.S6.sons->data[i_536127_839829468], tmp20);
res_536485_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = expr0;
LOC14[1] = rope_178401_2381377266(((NI64) (L0)));
LOC14[2] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_142), LOC14, 3);
}
goto LA2;
LA7: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = expr0;
LOC16[1] = rope_178401_2381377266(((NI64) (L0)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_143), LOC16, 2);
}
LA2: ;
}
break;
case ((Tnodekind292020) 139):
{
Tsym292834* field0;
Ropeobj178006* tmp0;
NI64 L0;
TY536401 LOC18;
TY532811 LOC19;
field0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
tmp0 = discriminatortablename_536057_839829468(m0, typ0, field0);
L0 = lengthord_320007_3876443242((*field0).typ);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = expr0;
LOC18[1] = gettypedesc_535673_839829468(m0, typ0);
LOC18[2] = (*field0).loc.r;
LOC18[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC18[4] = makecstring_191638_155036129((*(*field0).name).s);
LOC18[5] = tmp0;
LOC18[6] = rope_178401_2381377266(L0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_202), LOC18, 7);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0;
LOC19[1] = rope_178401_2381377266((NI64)(L0 + IL64(1)));
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_203), LOC19, 2);
{
NI i_536421_839829468;
NI HEX3Atmp_536501_839829468;
NI LOC21;
NI res_536504_839829468;
i_536421_839829468 = (NI)0;
HEX3Atmp_536501_839829468 = (NI)0;
LOC21 = (NI)0;
LOC21 = sonslen_295351_850551059(n0);
HEX3Atmp_536501_839829468 = (NI)(LOC21 - ((NI) 1));
res_536504_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* b0;
Ropeobj178006* tmp20;
Tnode292802* LOC24;
if (!(res_536504_839829468 <= HEX3Atmp_536501_839829468)) goto LA23;
i_536421_839829468 = res_536504_839829468;
b0 = (*n0).kindU.S6.sons->data[i_536421_839829468];
tmp20 = getnimnode_535945_839829468(m0);
LOC24 = (Tnode292802*)0;
LOC24 = lastson_295364_850551059(b0);
genobjectfields_536104_839829468(m0, typ0, LOC24, tmp20);
switch ((*b0).kind) {
case ((Tnodekind292020) 85):
{
{
NI LOC28;
LOC28 = (NI)0;
LOC28 = sonslen_295351_850551059(b0);
if (!(LOC28 < ((NI) 2))) goto LA29;
internalerror_196100_155036129((*b0).info, ((NimStringDesc*) &T839829468_204));
}
LA29: ;
{
NI j_536436_839829468;
NI HEX3Atmp_536494_839829468;
NI LOC32;
NI res_536497_839829468;
j_536436_839829468 = (NI)0;
HEX3Atmp_536494_839829468 = (NI)0;
LOC32 = (NI)0;
LOC32 = sonslen_295351_850551059(b0);
HEX3Atmp_536494_839829468 = (NI)(LOC32 - ((NI) 2));
res_536497_839829468 = ((NI) 0);
{
while (1) {
if (!(res_536497_839829468 <= HEX3Atmp_536494_839829468)) goto LA34;
j_536436_839829468 = res_536497_839829468;
{
NI x0;
NI64 LOC39;
NI y0;
NI64 LOC40;
if (!((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kind == ((Tnodekind292020) 44))) goto LA37;
LOC39 = (NI64)0;
LOC39 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 0)]);
x0 = ((NI) (LOC39));
LOC40 = (NI64)0;
LOC40 = getordvalue_320129_3876443242((*(*b0).kindU.S6.sons->data[j_536436_839829468]).kindU.S6.sons->data[((NI) 1)]);
y0 = ((NI) (LOC40));
{
while (1) {
TY535238 LOC43;
if (!(x0 <= y0)) goto LA42;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = tmp0;
LOC43[1] = rope_178401_2381377266(((NI64) (x0)));
LOC43[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC43, 3);
x0 += ((NI) 1);
} LA42: ;
}
}
goto LA35;
LA37: ;
{
TY535238 LOC45;
NI64 LOC46;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = tmp0;
LOC46 = (NI64)0;
LOC46 = getordvalue_320129_3876443242((*b0).kindU.S6.sons->data[j_536436_839829468]);
LOC45[1] = rope_178401_2381377266(LOC46);
LOC45[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC45, 3);
}
LA35: ;
res_536497_839829468 += ((NI) 1);
} LA34: ;
}
}
}
break;
case ((Tnodekind292020) 88):
{
TY535238 LOC48;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = tmp0;
LOC48[1] = rope_178401_2381377266(L0);
LOC48[2] = tmp20;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_140), LOC48, 3);
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_205));
}
break;
}
res_536504_839829468 += ((NI) 1);
} LA23: ;
}
}
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
field0 = (*n0).kindU.S4.sym;
{
TY536475 LOC55;
if (!((*field0).kindU.S4.bitsize == ((NI) 0))) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = expr0;
LOC55[1] = gettypedesc_535673_839829468(m0, typ0);
LOC55[2] = (*field0).loc.r;
LOC55[3] = gentypeinfo_535941_839829468(m0, (*field0).typ);
LOC55[4] = makecstring_191638_155036129((*(*field0).name).s);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_206), LOC55, 5);
}
LA53: ;
}
break;
default:
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_207));
}
break;
}
}
N_NIMCALL(void, genobjectinfo_536508_839829468)(Tcgen529027* m0, Ttype292840* typ0, Ttype292840* origtype0, Ropeobj178006* name0) {
Ropeobj178006* tmp0;
TY532811 LOC12;
Ttype292840* t0;
{
if (!((*typ0).kind == ((Ttypekind292244) 17))) goto LA3;
gentypeinfoaux_536027_839829468(m0, typ0, origtype0, name0);
}
goto LA1;
LA3: ;
{
Ropeobj178006* LOC6;
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178277_2381377266(((NimStringDesc*) &T839829468_18));
gentypeinfoauxbase_535960_839829468(m0, typ0, origtype0, name0, LOC6);
}
LA1: ;
tmp0 = getnimnode_535945_839829468(m0);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = isimportedcpptype_533478_839829468(typ0);
if (!!(LOC9)) goto LA10;
genobjectfields_536104_839829468(m0, typ0, (*typ0).n, tmp0);
}
LA10: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = name0;
LOC12[1] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_144), LOC12, 2);
t0 = (*typ0).sons->data[((NI) 0)];
{
while (1) {
if (!!((t0 == NIM_NIL))) goto LA14;
t0 = skiptypes_296099_850551059(t0, IL64(211106247215360));
(*t0).flags |= ((NU32)1)<<((((Ttypeflag292431) 5))%(sizeof(NU32)*8));
t0 = (*t0).sons->data[((NI) 0)];
} LA14: ;
}
}
N_NIMCALL(void, gendeepcopyproc_538066_839829468)(Tcgen529027* m0, Tsym292834* s0, Ropeobj178006* result0) {
TY532811 LOC1;
genproc_532951_839829468(m0, s0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = result0;
LOC1[1] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_208), LOC1, 2);
}
N_NIMCALL(Ropeobj178006*, gentypeinfo_535941_839829468)(Tcgen529027* m0, Ttype292840* t_535944_839829468) {
Ropeobj178006* result0;
Ttype292840* origtype0;
Ttype292840* t0;
TY178507 LOC1;
Tsym292834* owner0;
Ttype292840* LOC12;
Ropeobj178006* LOC66;
Ropeobj178006* LOC67;
Ropeobj178006* LOC68;
{ result0 = (Ropeobj178006*)0;
origtype0 = t_535944_839829468;
t0 = getuniquetype_528640_2036603609(t_535944_839829468);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rope_178401_2381377266(((NI64) ((*t0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_127), LOC1, 1);
{
NIM_BOOL LOC4;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*m0).typeinfomarker), (*t0).Sup.id);
if (!LOC4) goto LA5;
LOC7 = (Ropeobj178006*)0;
LOC7 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX26_178418_2381377266(LOC7, result0);
LOC9 = (Ropeobj178006*)0;
LOC9 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC8, LOC9);
goto BeforeRet;
}
LA5: ;
{
while (1) {
if (!((*t0).kind == ((Ttypekind292244) 13))) goto LA11;
t0 = lastson_295377_850551059(t0);
} LA11: ;
}
LOC12 = (Ttype292840*)0;
LOC12 = skiptypes_296099_850551059(t0, IL64(211106247256320));
owner0 = getmodule_299123_2984716966((*LOC12).owner);
{
Tcgen529027* LOC17;
Ropeobj178006* LOC18;
Ropeobj178006* LOC19;
Ropeobj178006* LOC20;
TY532811 LOC21;
NimStringDesc* LOC22;
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
Ropeobj178006* LOC25;
if (!!((owner0 == (*m0).module))) goto LA15;
LOC17 = (Tcgen529027*)0;
LOC17 = bmod_529201_3723162438(owner0);
LOC18 = (Ropeobj178006*)0;
LOC18 = gentypeinfo_535941_839829468(LOC17, t0);
LOC19 = (Ropeobj178006*)0;
LOC19 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_129));
LOC20 = (Ropeobj178006*)0;
LOC20 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = result0;
LOC22 = (NimStringDesc*)0;
LOC22 = typetostring_320017_3876443242(t0, ((Tprefereddesc320011) 0));
LOC21[1] = rope_178277_2381377266(LOC22);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_131), LOC21, 2);
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178418_2381377266(LOC23, result0);
LOC25 = (Ropeobj178006*)0;
LOC25 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC24, LOC25);
goto BeforeRet;
}
LA15: ;
switch ((*t0).kind) {
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 23):
{
Ropeobj178006* LOC28;
LOC28 = (Ropeobj178006*)0;
LOC28 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC28);
}
break;
case ((Ttypekind292244) 59):
{
{
Ttype292840* LOC34;
if (!!(((*t0).n == NIM_NIL))) goto LA32;
LOC34 = (Ttype292840*)0;
LOC34 = lastson_295377_850551059(t0);
result0 = gentypeinfo_535941_839829468(m0, LOC34);
}
goto LA30;
LA32: ;
{
NimStringDesc* LOC36;
LOC36 = (NimStringDesc*)0;
LOC36 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC36, ((NimStringDesc*) &T839829468_137));
appendString(LOC36, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC36, 41);
internalerror_196113_155036129(LOC36);
}
LA30: ;
}
break;
case ((Ttypekind292244) 25):
{
{
Ropeobj178006* LOC42;
if (!!(((*t0).callconv == ((Tcallingconvention292002) 8)))) goto LA40;
LOC42 = (Ropeobj178006*)0;
LOC42 = rope_178277_2381377266(((NimStringDesc*) &T839829468_132));
gentypeinfoauxbase_535960_839829468(m0, t0, t0, result0, LOC42);
}
goto LA38;
LA40: ;
{
Ttype292840* LOC44;
LOC44 = (Ttype292840*)0;
LOC44 = fakeclosuretype_537010_839829468((*t0).owner);
gentupleinfo_536551_839829468(m0, LOC44, result0);
}
LA38: ;
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 22):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
{
Ropeobj178006* markerproc0;
TY532811 LOC50;
if (!(((Tgcmode169080) 4) <= gselectedgc_169133_2607990831)) goto LA48;
markerproc0 = gentraverseproc_537632_839829468(m0, t0, ((Ttypeinforeason537016) 0));
memset((void*)LOC50, 0, sizeof(LOC50));
LOC50[0] = result0;
LOC50[1] = markerproc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_192), LOC50, 2);
}
LA48: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 20):
{
gentypeinfoaux_536027_839829468(m0, t0, t0, result0);
}
break;
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 16):
{
genarrayinfo_537005_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 19):
{
gensetinfo_536867_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 14):
{
genenuminfo_536599_839829468(m0, t0, result0);
}
break;
case ((Ttypekind292244) 17):
{
genobjectinfo_536508_839829468(m0, t0, origtype0, result0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleinfo_536551_839829468(m0, t0, result0);
}
break;
default:
{
NimStringDesc* LOC58;
LOC58 = (NimStringDesc*)0;
LOC58 = rawNewString(reprEnum((NI)(*t0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC58, ((NimStringDesc*) &T839829468_137));
appendString(LOC58, reprEnum((NI)(*t0).kind, (&NTI292244)));
appendChar(LOC58, 41);
internalerror_196113_155036129(LOC58);
}
break;
}
{
if (!!(((*t0).deepcopy == NIM_NIL))) goto LA61;
gendeepcopyproc_538066_839829468(m0, (*t0).deepcopy, result0);
}
goto LA59;
LA61: ;
{
if (!!(((*origtype0).deepcopy == NIM_NIL))) goto LA64;
gendeepcopyproc_538066_839829468(m0, (*origtype0).deepcopy, result0);
}
goto LA59;
LA64: ;
LA59: ;
LOC66 = (Ropeobj178006*)0;
LOC66 = rope_178277_2381377266(((NimStringDesc*) &T839829468_128));
LOC67 = (Ropeobj178006*)0;
LOC67 = HEX26_178418_2381377266(LOC66, result0);
LOC68 = (Ropeobj178006*)0;
LOC68 = rope_178277_2381377266(((NimStringDesc*) &T839829468_117));
result0 = HEX26_178418_2381377266(LOC67, LOC68);
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, localdebuginfo_538449_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* a0;
TY535235 LOC16;
NimStringDesc* LOC17;
{ {
if (!!(((163840 & (*p0).options) == 163840))) goto LA3;
goto BeforeRet;
}
LA3: ;
{
Ttype292840* LOC7;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*s0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC7).kind)&63U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
a0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), (*s0).loc.r);
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*s0).kind == ((Tsymkind292435) 3));
if (!(LOC12)) goto LA13;
LOC12 = ccgintroducedptr_533611_839829468(s0);
LA13: ;
if (!LOC12) goto LA14;
a0 = (*s0).loc.r;
}
LA14: ;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC17 = (NimStringDesc*)0;
LOC17 = nsuNormalize((*(*s0).name).s);
LOC16[1] = makecstring_191638_155036129(LOC17);
LOC16[2] = a0;
LOC16[3] = gentypeinfo_535941_839829468((*p0).module, (*s0).loc.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_126), LOC16, 4);
(*p0).maxframelen += ((NI) 1);
(*p0).blocks->data[(NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1))].framelen += ((NI) 1);
}BeforeRet: ;
}
N_NIMCALL(void, assignlocalvar_538614_839829468)(Tcproc529021* p0, Tsym292834* s0) {
Ropeobj178006* decl0;
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006*)0;
LOC1 = localvardecl_538532_839829468(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_125));
decl0 = HEX26_178447_2381377266(LOC2, tnl_176644_4151366050);
line_532690_839829468(p0, ((Tcprocsection529011) 0), decl0);
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, initlocalvar_538398_839829468)(Tcproc529021* p0, Tsym292834* v0, NIM_BOOL immediateasgn0) {
{
if (!!((((*v0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0))) goto LA3;
{
if (!!(immediateasgn0)) goto LA7;
constructloc_538388_839829468(p0, (&(*v0).loc), NIM_FALSE);
}
LA7: ;
}
LA3: ;
}
N_NIMCALL(void, fillresult_533865_839829468)(Tsym292834* param0) {
TY533289 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_210), LOC1, 0);
fillloc_532282_839829468((&(*param0).loc), ((Tlockind292808) 4), (*param0).typ, LOC2, ((Tstorageloc292812) 2));
{
NIM_BOOL LOC5;
Tctypekind529007 LOC6;
LOC5 = (NIM_BOOL)0;
LOC6 = (Tctypekind529007)0;
LOC6 = mapreturntype_533447_839829468((*param0).typ);
LOC5 = !((LOC6 == ((Tctypekind529007) 17)));
if (!(LOC5)) goto LA7;
LOC5 = isinvalidreturntype_533550_839829468((*param0).typ);
LA7: ;
if (!LOC5) goto LA8;
(*param0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
(*param0).loc.s = ((Tstorageloc292812) 0);
}
LA8: ;
}
N_NIMCALL(void, assignparam_538994_839829468)(Tcproc529021* p0, Tsym292834* s0) {
localdebuginfo_538449_839829468(p0, s0);
}
N_NIMCALL(void, closuresetup_560158_839829468)(Tcproc529021* p0, Tsym292834* prc0) {
Tnode292802* ls0;
Tnode292802* LOC5;
Tsym292834* env0;
TY532811 LOC10;
{ {
if (!!((((*(*prc0).typ).flags &(1U<<((NU)(((Ttypeflag292431) 11))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059((*prc0).ast, ((NI) 3));
ls0 = lastson_295364_850551059(LOC5);
{
if (!!(((*ls0).kind == ((Tnodekind292020) 3)))) goto LA8;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_211));
}
LA8: ;
env0 = (*ls0).kindU.S4.sym;
assignlocalvar_538614_839829468(p0, env0);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&(*env0).loc));
LOC10[1] = gettypedesc_535673_839829468((*p0).module, (*env0).typ);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_212), LOC10, 2);
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, initgcframe_538435_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).gcframetype;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_217), LOC5, 1);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, initframe_560140_839829468)(Tcproc529021* p0, Ropeobj178006* procname0, Ropeobj178006* filename0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_218));
{
Ropeobj178006* LOC6;
TY535235 LOC7;
if (!(((NI) 0) < (*p0).maxframelen)) goto LA4;
LOC6 = (Ropeobj178006*)0;
LOC6 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_219));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = procname0;
LOC7[1] = filename0;
LOC7[2] = rope_178401_2381377266(((NI64) ((*p0).maxframelen)));
LOC7[3] = rope_178401_2381377266(((NI64) ((*p0).blocks->data[((NI) 0)].framelen)));
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_220), LOC7, 4);
}
goto LA2;
LA4: ;
{
TY532811 LOC9;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = procname0;
LOC9[1] = filename0;
result0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_221), LOC9, 2);
}
LA2: ;
return result0;
}
N_NIMCALL(void, appcg_532648_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, args0, args0Len0);
add_178482_2381377266(LOC1, LOC2);
}
N_NIMCALL(Ropeobj178006*, deinitgcframe_538441_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY533289 LOC5;
if (!(((NI) 0) < ((NI) ((*p0).gcframeid)))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_225), LOC5, 0);
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, deinitframe_560150_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
TY533289 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_226), LOC1, 0);
return result0;
}
N_NIMCALL(void, genprocaux_560284_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
Tcproc529021* p0;
Ropeobj178006* header0;
Ropeobj178006* returnstmt0;
Tnode292802* LOC51;
Ropeobj178006* generatedproc0;
p0 = newproc_529206_3723162438(prc0, m0);
header0 = genprocheader_535867_839829468(m0, prc0);
returnstmt0 = NIM_NIL;
{
NIM_BOOL LOC3;
Tsym292834* res0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
if (!(LOC3)) goto LA4;
LOC3 = !(((*(*prc0).typ).sons->data[((NI) 0)] == NIM_NIL));
LA4: ;
if (!LOC3) goto LA5;
{
NI LOC9;
LOC9 = (NI)0;
LOC9 = len_293081_850551059((*prc0).ast);
if (!(LOC9 <= ((NI) 7))) goto LA10;
internalerror_196100_155036129((*prc0).info, ((NimStringDesc*) &T839829468_120));
}
LA10: ;
res0 = (*(*(*prc0).ast).kindU.S6.sons->data[((NI) 7)]).kindU.S4.sym;
{
NIM_BOOL LOC14;
TY178507 LOC34;
LOC14 = (NIM_BOOL)0;
LOC14 = isinvalidreturntype_533550_839829468((*(*prc0).typ).sons->data[((NI) 0)]);
if (!!(LOC14)) goto LA15;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA19;
(*res0).flags |= ((NU32)1)<<((((Tsymflag292184) 12))%(sizeof(NU32)*8));
}
LA19: ;
{
NIM_BOOL LOC23;
NIM_BOOL LOC24;
NIM_BOOL LOC26;
Tnode292802* val0;
Tnode292802* LOC29;
Ropeobj178006* decl0;
Tloc292816 a0;
TY532811 LOC32;
LOC23 = (NIM_BOOL)0;
LOC24 = (NIM_BOOL)0;
LOC24 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
if (!(LOC24)) goto LA25;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
LOC24 = LOC26;
LA25: ;
LOC23 = LOC24;
if (!(LOC23)) goto LA28;
LOC29 = (Tnode292802*)0;
LOC29 = getbody_335226_1724185294(prc0);
val0 = easyresultasgn_560191_839829468(LOC29);
LOC23 = !((val0 == NIM_NIL));
LA28: ;
if (!LOC23) goto LA30;
decl0 = localvardecl_538532_839829468(p0, res0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexprsingleuse_539289_839829468(p0, val0, (&a0));
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = decl0;
LOC32[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC32, 2);
}
goto LA21;
LA30: ;
{
assignlocalvar_538614_839829468(p0, res0);
initlocalvar_538398_839829468(p0, res0, NIM_FALSE);
}
LA21: ;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&(*res0).loc));
returnstmt0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_209), LOC34, 1);
}
goto LA12;
LA15: ;
{
fillresult_533865_839829468(res0);
assignparam_538994_839829468(p0, res0);
{
Ttype292840* LOC38;
LOC38 = (Ttype292840*)0;
LOC38 = skiptypes_296099_850551059((*res0).typ, IL64(211106232576256));
if (!((*LOC38).kind == ((Ttypekind292244) 16))) goto LA39;
(*res0).loc.s = ((Tstorageloc292812) 0);
}
LA39: ;
}
LA12: ;
}
LA5: ;
{
NI i_560627_839829468;
NI HEX3Atmp_560743_839829468;
NI LOC42;
NI res_560746_839829468;
i_560627_839829468 = (NI)0;
HEX3Atmp_560743_839829468 = (NI)0;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059((*(*prc0).typ).n);
HEX3Atmp_560743_839829468 = (NI)(LOC42 - ((NI) 1));
res_560746_839829468 = ((NI) 1);
{
while (1) {
if (!(res_560746_839829468 <= HEX3Atmp_560743_839829468)) goto LA44;
i_560627_839829468 = res_560746_839829468;
{
Tsym292834* param0;
param0 = (*(*(*(*prc0).typ).n).kindU.S6.sons->data[i_560627_839829468]).kindU.S4.sym;
{
NIM_BOOL LOC48;
LOC48 = (NIM_BOOL)0;
LOC48 = iscompiletimeonly_328706_3876443242((*param0).typ);
if (!LOC48) goto LA49;
goto LA45;
}
LA49: ;
assignparam_538994_839829468(p0, param0);
} LA45: ;
res_560746_839829468 += ((NI) 1);
} LA44: ;
}
}
closuresetup_560158_839829468(p0, prc0);
LOC51 = (Tnode292802*)0;
LOC51 = getbody_335226_1724185294(prc0);
genstmts_539244_839829468(p0, LOC51);
generatedproc0 = (Ropeobj178006*)0;
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0)) goto LA54;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA58;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA58: ;
}
LA54: ;
{
TY535235 LOC68;
Ropeobj178006** LOC69;
Ropeobj178006** LOC70;
Ropeobj178006** LOC71;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA62;
{
if (!((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0)) goto LA66;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_214), header0);
}
LA66: ;
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = header0;
LOC69 = (Ropeobj178006**)0;
LOC69 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC68[1] = (*LOC69);
LOC70 = (Ropeobj178006**)0;
LOC70 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC68[2] = (*LOC70);
LOC71 = (Ropeobj178006**)0;
LOC71 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC68[3] = (*LOC71);
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_215), LOC68, 4);
}
goto LA60;
LA62: ;
{
TY178507 LOC73;
Ropeobj178006* LOC74;
Ropeobj178006** LOC93;
Ropeobj178006** LOC94;
Ropeobj178006* LOC101;
TY533289 LOC107;
Ropeobj178006* LOC108;
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = header0;
generatedproc0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_216), LOC73, 1);
LOC74 = (Ropeobj178006*)0;
LOC74 = initgcframe_538435_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC74);
{
Ropeobj178006** LOC79;
Ropeobj178006* procname0;
Ropeobj178006* LOC80;
Ropeobj178006* LOC81;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA77;
LOC79 = (Ropeobj178006**)0;
LOC79 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC79));
procname0 = makecstring_191638_155036129((*(*prc0).name).s);
LOC80 = (Ropeobj178006*)0;
LOC80 = quotedfilename_196818_155036129((*prc0).info);
LOC81 = (Ropeobj178006*)0;
LOC81 = initframe_560140_839829468(p0, procname0, LOC80);
add_178482_2381377266(&generatedproc0, LOC81);
}
goto LA75;
LA77: ;
{
Ropeobj178006** LOC83;
LOC83 = (Ropeobj178006**)0;
LOC83 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&generatedproc0, (*LOC83));
}
LA75: ;
{
TY533289 LOC88;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA86;
memset((void*)LOC88, 0, sizeof(LOC88));
appcg_532648_839829468(p0, ((Tcprocsection529011) 1), ((NimStringDesc*) &T839829468_222), LOC88, 0);
}
LA86: ;
{
if (!(*p0).beforeretneeded) goto LA91;
add_178487_2381377266(&generatedproc0, ((NimStringDesc*) &T839829468_223));
}
LA91: ;
LOC93 = (Ropeobj178006**)0;
LOC93 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&generatedproc0, (*LOC93));
LOC94 = (Ropeobj178006**)0;
LOC94 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&generatedproc0, (*LOC94));
{
TY533289 LOC99;
Ropeobj178006* LOC100;
if (!(*p0).beforeretneeded) goto LA97;
memset((void*)LOC99, 0, sizeof(LOC99));
LOC100 = (Ropeobj178006*)0;
LOC100 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_224), LOC99, 0);
add_178482_2381377266(&generatedproc0, LOC100);
}
LA97: ;
LOC101 = (Ropeobj178006*)0;
LOC101 = deinitgcframe_538441_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC101);
{
Ropeobj178006* LOC106;
if (!(((*prc0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA104;
LOC106 = (Ropeobj178006*)0;
LOC106 = deinitframe_560150_839829468(p0);
add_178482_2381377266(&generatedproc0, LOC106);
}
LA104: ;
add_178482_2381377266(&generatedproc0, returnstmt0);
memset((void*)LOC107, 0, sizeof(LOC107));
LOC108 = (Ropeobj178006*)0;
LOC108 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_227), LOC107, 0);
add_178482_2381377266(&generatedproc0, LOC108);
}
LA60: ;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
}
N_NIMCALL(Tcgen529027*, findpendingmodule_532241_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Tcgen529027* result0;
Tsym292834* ms0;
result0 = (Tcgen529027*)0;
ms0 = getmodule_299123_2984716966(s0);
result0 = gmodules_529170_3723162438->data[(*ms0).position];
return result0;
}
N_NIMCALL(NIM_BOOL, isgetprocaddr_559443_839829468)(Tlib292820* lib0) {
NIM_BOOL result0;
Tnode292802* n0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
n0 = (*lib0).path;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*n0).kind == ((Tnodekind292020) 27) || (*n0).kind == ((Tnodekind292020) 29) || (*n0).kind == ((Tnodekind292020) 30) || (*n0).kind == ((Tnodekind292020) 31) || (*n0).kind == ((Tnodekind292020) 26) || (*n0).kind == ((Tnodekind292020) 28) || (*n0).kind == ((Tnodekind292020) 32));
if (!(LOC2)) goto LA3;
LOC2 = !(((*n0).typ == NIM_NIL));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((100663296 &((NU64)1<<((NU)((*(*n0).typ).kind)&63U)))!=0);
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, initlocexpr_539283_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* result0) {
initloc_532273_839829468(result0, ((Tlockind292808) 0), (*e0).typ, ((Tstorageloc292812) 0));
expr_539248_839829468(p0, e0, result0);
}
N_NIMCALL(void, loaddynamiclib_559481_839829468)(Tcgen529027* m0, Tlib292820* lib0) {
{
Ropeobj178006* tmp0;
TY178507 LOC5;
if (!!((*lib0).generated)) goto LA3;
(*lib0).generated = NIM_TRUE;
tmp0 = gettempname_533598_839829468(m0);
asgnRefNoCycle((void**) (&(*lib0).name), tmp0);
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = tmp0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_228), LOC5, 1);
{
TY135002* s0;
Ropeobj178006* loadlib0;
TY532811 LOC18;
if (!((*(*lib0).path).kind >= ((Tnodekind292020) 20) && (*(*lib0).path).kind <= ((Tnodekind292020) 22))) goto LA8;
s0 = (TY135002*) newSeq((&NTI135002), 0);
libcandidates_170605_2607990831((*(*lib0).path).kindU.S3.strval, (&s0));
rawmessage_194612_155036129(((Tmsgkind191002) 286), (*(*lib0).path).kindU.S3.strval);
loadlib0 = NIM_NIL;
{
NI i_559847_839829468;
NI HEX3Atmp_559902_839829468;
NI res_559905_839829468;
i_559847_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (NI)0;
HEX3Atmp_559902_839829468 = (s0 ? (s0->Sup.len-1) : -1);
res_559905_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC17;
if (!(res_559905_839829468 <= HEX3Atmp_559902_839829468)) goto LA12;
i_559847_839829468 = res_559905_839829468;
(*m0).labels += ((NI) 1);
{
if (!(((NI) 0) < i_559847_839829468)) goto LA15;
add_178487_2381377266(&loadlib0, ((NimStringDesc*) &T839829468_229));
}
LA15: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = getstrlit_549468_839829468(m0, s0->data[i_559847_839829468]);
appcg_532632_839829468(m0, &loadlib0, ((NimStringDesc*) &T839829468_230), LOC17, 2);
res_559905_839829468 += ((NI) 1);
} LA12: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = loadlib0;
LOC18[1] = getstrlit_549468_839829468(m0, (*(*lib0).path).kindU.S3.strval);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_231), LOC18, 2);
}
goto LA6;
LA8: ;
{
Tcproc529021* p0;
Tloc292816 dest0;
Ropeobj178006** LOC20;
Ropeobj178006** LOC21;
Ropeobj178006** LOC22;
TY532811 LOC23;
p0 = newproc_529206_3723162438(NIM_NIL, m0);
(*p0).options = ((*p0).options & ~ 163840);
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*lib0).path, (&dest0));
LOC20 = (Ropeobj178006**)0;
LOC20 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], (*LOC20));
LOC21 = (Ropeobj178006**)0;
LOC21 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC21));
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 16))- 0], (*LOC22));
memset((void*)LOC23, 0, sizeof(LOC23));
LOC23[0] = tmp0;
LOC23[1] = rdloc_538188_839829468((&dest0));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_232), LOC23, 2);
}
LA6: ;
}
LA3: ;
{
if (!((*lib0).name == NIM_NIL)) goto LA26;
internalerror_196113_155036129(((NimStringDesc*) &T839829468_233));
}
LA26: ;
}
N_NIMCALL(Ropeobj178006*, mangledynlibproc_538816_839829468)(Tsym292834* sym0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 16))&31U)))!=0)) goto LA3;
result0 = rope_178277_2381377266((*(*sym0).name).s);
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(((NI64) ((*sym0).Sup.id)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_234), LOC6, 1);
}
LA1: ;
return result0;
}
N_NIMCALL(void, symindynamiclib_559929_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
NIM_BOOL iscall0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY532811 LOC43;
lib0 = (*sym0).annex;
iscall0 = isgetprocaddr_559443_839829468(lib0);
extname0 = (*sym0).loc.r;
{
if (!!(iscall0)) goto LA3;
loaddynamiclib_559481_839829468(m0, lib0);
}
LA3: ;
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
(*m0).labels += ((NI) 2);
{
Tnode292802* n0;
Tloc292816 a0;
Tnode292802* LOC9;
Ropeobj178006* params0;
Ropeobj178006* LOC10;
Ropeobj178006* load0;
TY535235 LOC17;
NimStringDesc* LOC18;
Tnode292802* last0;
NimStringDesc* idx0;
if (!iscall0) goto LA7;
n0 = (*lib0).path;
memset((void*)(&a0), 0, sizeof(a0));
LOC9 = (Tnode292802*)0;
LOC9 = HEX5BHEX5D_293238_850551059(n0, ((NI) 0));
initlocexpr_539283_839829468((*m0).initproc, LOC9, (&a0));
LOC10 = (Ropeobj178006*)0;
LOC10 = rdloc_538188_839829468((&a0));
params0 = HEX26_178447_2381377266(LOC10, ((NimStringDesc*) &T839829468_118));
{
NI i_559964_839829468;
NI HEX3Atmp_560025_839829468;
NI LOC12;
NI res_560028_839829468;
i_559964_839829468 = (NI)0;
HEX3Atmp_560025_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = len_293081_850551059(n0);
HEX3Atmp_560025_839829468 = (NI)(LOC12 - ((NI) 2));
res_560028_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* LOC15;
Ropeobj178006* LOC16;
if (!(res_560028_839829468 <= HEX3Atmp_560025_839829468)) goto LA14;
i_559964_839829468 = res_560028_839829468;
LOC15 = (Tnode292802*)0;
LOC15 = HEX5BHEX5D_293238_850551059(n0, i_559964_839829468);
initlocexpr_539283_839829468((*m0).initproc, LOC15, (&a0));
LOC16 = (Ropeobj178006*)0;
LOC16 = rdloc_538188_839829468((&a0));
add_178482_2381377266(¶ms0, LOC16);
add_178487_2381377266(¶ms0, ((NimStringDesc*) &T839829468_110));
res_560028_839829468 += ((NI) 1);
} LA14: ;
}
}
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = tmp0;
LOC17[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC17[2] = params0;
LOC18 = (NimStringDesc*)0;
LOC18 = HEX24_178856_2381377266(extname0);
LOC17[3] = makecstring_191638_155036129(LOC18);
load0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_235), LOC17, 4);
last0 = lastson_295364_850551059(n0);
{
if (!((*last0).kind == ((Tnodekind292020) 58))) goto LA21;
last0 = (*last0).kindU.S6.sons->data[((NI) 1)];
}
LA21: ;
{
NimStringDesc* LOC27;
if (!!(((*last0).kind == ((Tnodekind292020) 20)))) goto LA25;
LOC27 = (NimStringDesc*)0;
LOC27 = HEX24_196185_1689653243(T839829468_236);
internalerror_196113_155036129(LOC27);
}
LA25: ;
idx0 = (*last0).kindU.S3.strval;
{
Ropeobj178006** LOC32;
if (!((idx0 ? idx0->Sup.len : 0) == ((NI) 0))) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC32, load0);
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC34;
LOC34 = (NIM_BOOL)0;
LOC34 = ((idx0 ? idx0->Sup.len : 0) == ((NI) 1));
if (!(LOC34)) goto LA35;
LOC34 = (((NU8)(idx0->data[((NI) 0)])) >= ((NU8)(48)) && ((NU8)(idx0->data[((NI) 0)])) <= ((NU8)(57)));
LA35: ;
if (!LOC34) goto LA36;
add_178482_2381377266(&(*m0).extensionloaders[(((NU8)(idx0->data[((NI) 0)])))- 48], load0);
}
goto LA28;
LA36: ;
{
NimStringDesc* LOC39;
LOC39 = (NimStringDesc*)0;
LOC39 = rawNewString(idx0->Sup.len + 13);
appendString(LOC39, ((NimStringDesc*) &T839829468_237));
appendString(LOC39, idx0);
internalerror_196100_155036129((*sym0).info, LOC39);
}
LA28: ;
}
goto LA5;
LA7: ;
{
TY535235 LOC41;
NimStringDesc* LOC42;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC41[2] = (*lib0).name;
LOC42 = (NimStringDesc*)0;
LOC42 = HEX24_178856_2381377266(extname0);
LOC41[3] = makecstring_191638_155036129(LOC42);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_238), LOC41, 4);
}
LA5: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*sym0).loc.r;
LOC43[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_239), LOC43, 2);
}
N_NIMCALL(void, symindynamiclibpartial_560071_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
asgnRefNoCycle((void**) (&(*sym0).loc.r), mangledynlibproc_538816_839829468(sym0));
asgnRefNoCycle((void**) (&(*(*sym0).typ).sym), NIM_NIL);
}
N_NIMCALL(void, genprocnoforward_560906_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ fillprocloc_539201_839829468(prc0);
useheader_532369_839829468(m0, prc0);
{
Ropeobj178006* LOC5;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 7))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = cgsym_532403_839829468(m0, (*(*prc0).name).s);
goto BeforeRet;
}
LA3: ;
genprocprototype_539254_839829468(m0, prc0);
{
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
}
goto LA6;
LA8: ;
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*prc0).Sup.id);
if (!!(LOC15)) goto LA16;
genprocaux_560284_839829468(m0, prc0);
}
LA16: ;
}
goto LA6;
LA11: ;
{
Tcgen529027* q0;
if (!(((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA19;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC23;
NIM_BOOL LOC25;
LOC23 = (NIM_BOOL)0;
LOC23 = !((q0 == NIM_NIL));
if (!(LOC23)) goto LA24;
LOC25 = (NIM_BOOL)0;
LOC25 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC23 = !(LOC25);
LA24: ;
if (!LOC23) goto LA26;
symindynamiclib_559929_839829468(q0, prc0);
}
goto LA21;
LA26: ;
{
symindynamiclibpartial_560071_839829468(m0, prc0);
}
LA21: ;
}
goto LA6;
LA19: ;
{
Tcgen529027* q0;
if (!!((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0))) goto LA30;
q0 = findpendingmodule_532241_839829468(m0, prc0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC36;
LOC34 = (NIM_BOOL)0;
LOC34 = !((q0 == NIM_NIL));
if (!(LOC34)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*prc0).Sup.id);
LOC34 = !(LOC36);
LA35: ;
if (!LOC34) goto LA37;
genprocaux_560284_839829468(q0, prc0);
}
LA37: ;
}
goto LA6;
LA30: ;
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(void, genproc_532951_839829468)(Tcgen529027* m0, Tsym292834* prc0) {
{ {
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 26))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isactivated_561431_839829468(prc0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
goto BeforeRet;
}
LA6: ;
fillprocloc_539201_839829468(prc0);
{
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA10;
addforwardedproc_532203_839829468(m0, prc0);
}
goto LA8;
LA10: ;
{
genprocnoforward_560906_839829468(m0, prc0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = ((65600 & (*prc0).flags) == 64);
if (!(LOC16)) goto LA17;
LOC16 = !((generatedheader_532201_839829468 == NIM_NIL));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA18: ;
if (!LOC15) goto LA19;
genprocprototype_539254_839829468(generatedheader_532201_839829468, prc0);
{
if (!((*(*prc0).typ).callconv == ((Tcallingconvention292002) 5))) goto LA23;
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = containsorincl_268862_2627731572((&(*generatedheader_532201_839829468).declaredthings), (*prc0).Sup.id);
if (!!(LOC27)) goto LA28;
genprocaux_560284_839829468(generatedheader_532201_839829468, prc0);
}
LA28: ;
}
LA23: ;
}
LA19: ;
}
LA8: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, emulatedthreadvars_532949_839829468)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((71303168 & ~ gglobaloptions_169130_2607990831)==0);
return result0;
}
N_NIMCALL(void, declarethreadvar_538676_839829468)(Tcgen529027* m0, Tsym292834* s0, NIM_BOOL isextern0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!LOC3) goto LA4;
{
NIM_BOOL LOC8;
TY532811 LOC11;
LOC8 = (NIM_BOOL)0;
LOC8 = containsorincl_268862_2627731572((&nimtvdeclared_538675_839829468), (*s0).Sup.id);
if (!!(LOC8)) goto LA9;
nimtvdeps_538674_839829468 = (Ttypeseq292836*) incrSeqV2(&(nimtvdeps_538674_839829468)->Sup, sizeof(Ttype292840*));
asgnRefNoCycle((void**) (&nimtvdeps_538674_839829468->data[nimtvdeps_538674_839829468->Sup.len]), (*s0).loc.t);
++nimtvdeps_538674_839829468->Sup.len;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468(m0, (*s0).loc.t);
LOC11[1] = (*s0).loc.r;
addf_179205_2381377266(&nimtv_538656_839829468, ((NimStringDesc*) &T839829468_54), LOC11, 2);
}
LA9: ;
}
goto LA1;
LA4: ;
{
Ropeobj178006* LOC21;
TY178507 LOC22;
{
if (!isextern0) goto LA15;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
}
LA15: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 22))&63U)))!=0)) goto LA19;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_241));
}
LA19: ;
LOC21 = (Ropeobj178006*)0;
LOC21 = gettypedesc_535673_839829468(m0, (*s0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC21);
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = (*s0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC22, 1);
}
LA1: ;
}
N_NIMCALL(void, genvarprototypeaux_544254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Ropeobj178006* LOC1;
{ useheader_532369_839829468(m0, sym0);
LOC1 = (Ropeobj178006*)0;
LOC1 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 3), (*sym0).typ, LOC1, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0);
if (LOC4) goto LA5;
LOC4 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LA5: ;
if (!LOC4) goto LA6;
goto BeforeRet;
}
LA6: ;
{
if (!!(((*(*sym0).owner).Sup.id == (*(*m0).module).Sup.id))) goto LA10;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA14;
declarethreadvar_538676_839829468(m0, sym0, NIM_TRUE);
}
goto LA12;
LA14: ;
{
Ropeobj178006* LOC17;
TY178507 LOC30;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_240));
LOC17 = (Ropeobj178006*)0;
LOC17 = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC17);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA20;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_53));
}
LA20: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA24;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_121));
}
LA24: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA28;
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_122));
}
LA28: ;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = (*sym0).loc.r;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_242), LOC30, 1);
}
LA12: ;
}
LA10: ;
}BeforeRet: ;
}
N_NIMCALL(void, genvarprototype_539236_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
genvarprototypeaux_544254_839829468(m0, sym0);
}
N_NIMCALL(Ropeobj178006*, cgsym_532403_839829468)(Tcgen529027* m0, NimStringDesc* name0) {
Ropeobj178006* result0;
Tsym292834* sym0;
result0 = (Ropeobj178006*)0;
sym0 = getcompilerproc_338748_3937434831(name0);
{
if (!!((sym0 == NIM_NIL))) goto LA3;
switch ((*sym0).kind) {
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 13):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
genproc_532951_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
genvarprototype_539236_839829468(m0, sym0);
}
break;
case ((Tsymkind292435) 7):
{
Ropeobj178006* LOC8;
LOC8 = (Ropeobj178006*)0;
LOC8 = gettypedesc_535673_839829468(m0, (*sym0).typ);
}
break;
default:
{
NimStringDesc* LOC10;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(name0->Sup.len + reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 9);
appendString(LOC10, ((NimStringDesc*) &T839829468_243));
appendString(LOC10, name0);
appendString(LOC10, ((NimStringDesc*) &T839829468_244));
appendString(LOC10, reprEnum((NI)(*sym0).kind, (&NTI292435)));
internalerror_196113_155036129(LOC10);
}
break;
}
}
goto LA1;
LA3: ;
{
rawmessage_194612_155036129(((Tmsgkind191002) 68), name0);
}
LA1: ;
result0 = (*sym0).loc.r;
return result0;
}
N_NIMCALL(Ropeobj178006*, ropecg_532407_839829468)(Tcgen529027* m0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* result0;
NI i0;
NI length0;
NI num0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
length0 = (frmt0 ? frmt0->Sup.len : 0);
result0 = NIM_NIL;
num0 = ((NI) 0);
{
while (1) {
NI start0;
if (!(i0 < length0)) goto LA2;
{
if (!((NU8)(frmt0->data[i0]) == (NU8)(36))) goto LA5;
i0 += ((NI) 1);
switch (((NU8)(frmt0->data[i0]))) {
case 36:
{
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_19));
i0 += ((NI) 1);
}
break;
case 35:
{
i0 += ((NI) 1);
add_178482_2381377266(&result0, args0[num0]);
num0 += ((NI) 1);
}
break;
case 48 ... 57:
{
NI j0;
j0 = ((NI) 0);
{
while (1) {
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = (length0 <= i0);
if (LOC14) goto LA15;
LOC14 = !((((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57))));
LA15: ;
if (!LOC14) goto LA16;
goto LA10;
}
LA16: ;
}
} LA10: ;
num0 = j0;
{
NimStringDesc* LOC22;
NimStringDesc* LOC23;
if (!((NI)((args0Len0-1) + ((NI) 1)) < j0)) goto LA20;
LOC22 = (NimStringDesc*)0;
LOC23 = (NimStringDesc*)0;
LOC23 = nimIntToStr(j0);
LOC22 = rawNewString(LOC23->Sup.len + 30);
appendString(LOC22, ((NimStringDesc*) &T839829468_20));
appendString(LOC22, LOC23);
internalerror_196113_155036129(LOC22);
}
LA20: ;
add_178482_2381377266(&result0, args0[(NI)(j0 - ((NI) 1))]);
}
break;
case 110:
{
{
if (!!(((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 10))&31U)))!=0))) goto LA27;
add_178482_2381377266(&result0, rnl_178903_2381377266);
}
LA27: ;
i0 += ((NI) 1);
}
break;
case 78:
{
add_178482_2381377266(&result0, rnl_178903_2381377266);
i0 += ((NI) 1);
}
break;
default:
{
NimStringDesc* LOC31;
LOC31 = (NimStringDesc*)0;
LOC31 = rawNewString(31);
appendString(LOC31, ((NimStringDesc*) &T839829468_20));
appendChar(LOC31, frmt0->data[i0]);
internalerror_196113_155036129(LOC31);
}
break;
}
}
goto LA3;
LA5: ;
{
NIM_BOOL LOC33;
NI j0;
NimStringDesc* ident0;
Ropeobj178006* LOC39;
LOC33 = (NIM_BOOL)0;
LOC33 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC33)) goto LA34;
LOC33 = (((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(97)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(122)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) >= ((NU8)(65)) && ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) <= ((NU8)(90)) || ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(95)));
LA34: ;
if (!LOC33) goto LA35;
i0 += ((NI) 1);
j0 = i0;
{
while (1) {
if (!(((NU8)(frmt0->data[j0])) >= ((NU8)(97)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(122)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(65)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(90)) || ((NU8)(frmt0->data[j0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[j0])) <= ((NU8)(57)) || ((NU8)(frmt0->data[j0])) == ((NU8)(95)))) goto LA38;
j0 += ((NI) 1);
} LA38: ;
}
ident0 = copyStrLast(frmt0, i0, (NI)(j0 - ((NI) 1)));
i0 = j0;
LOC39 = (Ropeobj178006*)0;
LOC39 = cgsym_532403_839829468(m0, ident0);
add_178482_2381377266(&result0, LOC39);
}
goto LA3;
LA35: ;
{
NIM_BOOL LOC41;
NI j0;
NimStringDesc* LOC47;
Ropeobj178006* LOC48;
LOC41 = (NIM_BOOL)0;
LOC41 = ((NU8)(frmt0->data[i0]) == (NU8)(35));
if (!(LOC41)) goto LA42;
LOC41 = ((NU8)(frmt0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(36));
LA42: ;
if (!LOC41) goto LA43;
i0 += ((NI) 2);
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(frmt0->data[i0])) >= ((NU8)(48)) && ((NU8)(frmt0->data[i0])) <= ((NU8)(57)))) goto LA46;
j0 = (NI)((NI)((NI)(j0 * ((NI) 10)) + ((NI) (((NU8)(frmt0->data[i0]))))) - ((NI) 48));
i0 += ((NI) 1);
} LA46: ;
}
LOC47 = (NimStringDesc*)0;
LOC47 = HEX24_178856_2381377266(args0[(NI)(j0 - ((NI) 1))]);
LOC48 = (Ropeobj178006*)0;
LOC48 = cgsym_532403_839829468(m0, LOC47);
add_178482_2381377266(&result0, LOC48);
}
goto LA3;
LA43: ;
LA3: ;
start0 = i0;
{
while (1) {
if (!(i0 < length0)) goto LA50;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(36)));
if (!(LOC53)) goto LA54;
LOC53 = !(((NU8)(frmt0->data[i0]) == (NU8)(35)));
LA54: ;
if (!LOC53) goto LA55;
i0 += ((NI) 1);
}
goto LA51;
LA55: ;
{
goto LA49;
}
LA51: ;
} LA50: ;
} LA49: ;
{
NimStringDesc* LOC62;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA60;
LOC62 = (NimStringDesc*)0;
LOC62 = copyStrLast(frmt0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC62);
}
LA60: ;
} LA2: ;
}
return result0;
}
static N_INLINE(NIM_BOOL, crossescppboundary_560754_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
Tsym292834* LOC4;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
if (!(LOC2)) goto LA3;
LOC4 = (Tsym292834*)0;
LOC4 = getmodule_299123_2984716966(sym0);
LOC2 = !((((*LOC4).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0));
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA5;
LOC1 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
LA5: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genprocprototype_539254_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
{ useheader_532369_839829468(m0, sym0);
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA7;
{
NIM_BOOL LOC11;
Tsym292834* LOC12;
NIM_BOOL LOC14;
TY532811 LOC17;
Ropeobj178006* LOC18;
LOC11 = (NIM_BOOL)0;
LOC12 = (Tsym292834*)0;
LOC12 = getmodule_299123_2984716966(sym0);
LOC11 = !(((*LOC12).Sup.id == (*(*m0).module).Sup.id));
if (!(LOC11)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC11 = !(LOC14);
LA13: ;
if (!LOC11) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC17[1] = mangledynlibproc_538816_839829468(sym0);
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_245), LOC17, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], LOC18);
}
LA15: ;
}
goto LA5;
LA7: ;
{
NIM_BOOL LOC20;
Ropeobj178006* header0;
TY178507 LOC47;
Ropeobj178006* LOC48;
LOC20 = (NIM_BOOL)0;
LOC20 = containsorincl_268862_2627731572((&(*m0).declaredprotos), (*sym0).Sup.id);
if (!!(LOC20)) goto LA21;
header0 = genprocheader_535867_839829468(m0, sym0);
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC25)) goto LA26;
LOC25 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 6))&7U)))!=0);
LA26: ;
if (!LOC25) goto LA27;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_213), header0);
}
LA27: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((*(*sym0).typ).callconv == ((Tcallingconvention292002) 5)));
if (!(LOC31)) goto LA32;
LOC31 = crossescppboundary_560754_839829468(m0, sym0);
LA32: ;
if (!LOC31) goto LA33;
header0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_246), header0);
}
LA33: ;
{
NIM_BOOL LOC37;
LOC37 = (NIM_BOOL)0;
LOC37 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0);
if (!(LOC37)) goto LA38;
LOC37 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA38: ;
if (!LOC37) goto LA39;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_247));
}
LA39: ;
{
NIM_BOOL LOC43;
LOC43 = (NIM_BOOL)0;
LOC43 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 14))&31U)))!=0);
if (!(LOC43)) goto LA44;
LOC43 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 7))&7U)))!=0);
LA44: ;
if (!LOC43) goto LA45;
add_178487_2381377266(&header0, ((NimStringDesc*) &T839829468_248));
}
LA45: ;
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = header0;
LOC48 = (Ropeobj178006*)0;
LOC48 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_191), LOC47, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], LOC48);
}
goto LA5;
LA21: ;
LA5: ;
}BeforeRet: ;
}
static N_INLINE(NIM_BOOL, usesnativegc_169177_2607990831)(void) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = (((Tgcmode169080) 5) <= gselectedgc_169133_2607990831);
return result0;
}
N_NIMCALL(void, genrefassign_538311_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = usesnativegc_169177_2607990831();
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(dest0);
LOC8[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC8, 2);
}
goto LA1;
LA6: ;
{
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA10;
{
NIM_BOOL LOC14;
TY532811 LOC17;
LOC14 = (NIM_BOOL)0;
LOC14 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC14) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_249), LOC17, 2);
}
goto LA12;
LA15: ;
{
TY532811 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_250), LOC19, 2);
}
LA12: ;
}
goto LA1;
LA10: ;
{
TY532811 LOC21;
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = addrloc_538204_839829468(dest0);
LOC21[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_251), LOC21, 2);
}
LA1: ;
}
N_NIMCALL(void, optasgnloc_549789_839829468)(Tloc292816* a0, Ttype292840* t0, Ropeobj178006* field0, Tloc292816* Result) {
Ropeobj178006* LOC1;
Ropeobj178006* LOC2;
(*Result).k = ((Tlockind292808) 5);
(*Result).s = (*a0).s;
unsureAsgnRef((void**) (&(*Result).t), t0);
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468(a0);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX26_178447_2381377266(LOC1, ((NimStringDesc*) &T839829468_257));
unsureAsgnRef((void**) (&(*Result).r), HEX26_178418_2381377266(LOC2, field0));
}
N_NIMCALL(void, genoptasgntuple_550001_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Tassignmentflag538302Set newflags0;
Ttype292840* t_550053_839829468;
Ttype292840* LOC9;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA3;
newflags0 = (flags0 | 1);
}
goto LA1;
LA3: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA6;
newflags0 = (flags0 & ~ 1);
}
goto LA1;
LA6: ;
{
newflags0 = flags0;
}
LA1: ;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*dest0).t, IL64(211106232576256));
t_550053_839829468 = getuniquetype_528640_2036603609(LOC9);
{
NI i_550071_839829468;
NI HEX3Atmp_550077_839829468;
NI LOC11;
NI res_550080_839829468;
i_550071_839829468 = (NI)0;
HEX3Atmp_550077_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = len_295339_850551059(t_550053_839829468);
HEX3Atmp_550077_839829468 = (LOC11 - 1);
res_550080_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* t0;
Ropeobj178006* field0;
TY178507 LOC14;
Tloc292816 LOC15;
Tloc292816 LOC16;
if (!(res_550080_839829468 <= HEX3Atmp_550077_839829468)) goto LA13;
i_550071_839829468 = res_550080_839829468;
t0 = (*t_550053_839829468).sons->data[i_550071_839829468];
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178401_2381377266(((NI64) (i_550071_839829468)));
field0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_260), LOC14, 1);
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(dest0, t0, field0, (&LOC15));
memset((void*)(&LOC16), 0, sizeof(LOC16));
optasgnloc_549789_839829468(src0, t0, field0, (&LOC16));
genassignment_539264_839829468(p0, (&LOC15), (&LOC16), newflags0);
res_550080_839829468 += ((NI) 1);
} LA13: ;
}
}
}
N_NIMCALL(void, gengenericasgn_550167_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
LOC3 = (((*LOC5).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
{
NIM_BOOL LOC10;
NIM_BOOL LOC12;
TY535238 LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC10) goto LA11;
LOC12 = (NIM_BOOL)0;
LOC12 = usesnativegc_169177_2607990831();
LOC10 = !(LOC12);
LA11: ;
if (!LOC10) goto LA13;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = addrloc_538204_839829468(dest0);
LOC15[1] = addrloc_538204_839829468(src0);
LOC15[2] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC15, 3);
}
goto LA8;
LA13: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = addrloc_538204_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_262), LOC17, 3);
}
LA8: ;
}
goto LA1;
LA6: ;
{
TY535238 LOC19;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = addrloc_538204_839829468(dest0);
LOC19[1] = addrloc_538204_839829468(src0);
LOC19[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_263), LOC19, 3);
}
LA1: ;
}
N_NIMCALL(NI, asgncomplexity_549751_839829468)(Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
if (!!((n0 == NIM_NIL))) goto LA3;
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
result0 = ((NI) 1);
}
break;
case ((Tnodekind292020) 139):
{
result0 = ((NI) 100);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* t_549768_839829468;
t_549768_839829468 = (Tnode292802*)0;
{
NI i_549782_839829468;
NI HEX3Atmp_549784_839829468;
NI LOC10;
NI res_549786_839829468;
i_549782_839829468 = (NI)0;
HEX3Atmp_549784_839829468 = (NI)0;
LOC10 = (NI)0;
LOC10 = len_293081_850551059(n0);
HEX3Atmp_549784_839829468 = (LOC10 - 1);
res_549786_839829468 = ((NI) 0);
{
while (1) {
NI LOC13;
if (!(res_549786_839829468 <= HEX3Atmp_549784_839829468)) goto LA12;
i_549782_839829468 = res_549786_839829468;
t_549768_839829468 = (*n0).kindU.S6.sons->data[i_549782_839829468];
LOC13 = (NI)0;
LOC13 = asgncomplexity_549751_839829468(t_549768_839829468);
result0 += LOC13;
res_549786_839829468 += ((NI) 1);
} LA12: ;
}
}
}
}
break;
default:
{
}
break;
}
}
LA3: ;
return result0;
}
N_NIMCALL(void, genoptasgnobject_550084_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0, Tnode292802* t0) {
Tassignmentflag538302Set newflags0;
{ {
if (!(t0 == NIM_NIL)) goto LA3;
goto BeforeRet;
}
LA3: ;
{
if (!((*src0).s == ((Tstorageloc292812) 1))) goto LA7;
newflags0 = (flags0 | 1);
}
goto LA5;
LA7: ;
{
if (!(((*(*dest0).t).flags &(1U<<((NU)(((Ttypeflag292431) 6))&31U)))!=0)) goto LA10;
newflags0 = (flags0 & ~ 1);
}
goto LA5;
LA10: ;
{
newflags0 = flags0;
}
LA5: ;
switch ((*t0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* field0;
Tloc292816 LOC14;
Tloc292816 LOC15;
field0 = (*t0).kindU.S4.sym;
memset((void*)(&LOC14), 0, sizeof(LOC14));
optasgnloc_549789_839829468(dest0, (*field0).typ, (*field0).loc.r, (&LOC14));
memset((void*)(&LOC15), 0, sizeof(LOC15));
optasgnloc_549789_839829468(src0, (*field0).typ, (*field0).loc.r, (&LOC15));
genassignment_539264_839829468(p0, (&LOC14), (&LOC15), newflags0);
}
break;
case ((Tnodekind292020) 138):
{
{
Tnode292802* child_550155_839829468;
child_550155_839829468 = (Tnode292802*)0;
{
NI i_550160_839829468;
NI HEX3Atmp_550162_839829468;
NI LOC19;
NI res_550164_839829468;
i_550160_839829468 = (NI)0;
HEX3Atmp_550162_839829468 = (NI)0;
LOC19 = (NI)0;
LOC19 = len_293081_850551059(t0);
HEX3Atmp_550162_839829468 = (LOC19 - 1);
res_550164_839829468 = ((NI) 0);
{
while (1) {
if (!(res_550164_839829468 <= HEX3Atmp_550162_839829468)) goto LA21;
i_550160_839829468 = res_550164_839829468;
child_550155_839829468 = (*t0).kindU.S6.sons->data[i_550160_839829468];
genoptasgnobject_550084_839829468(p0, dest0, src0, newflags0, child_550155_839829468);
res_550164_839829468 += ((NI) 1);
} LA21: ;
}
}
}
}
break;
default:
{
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, genassignment_539264_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0, Tassignmentflag538302Set flags0) {
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
TY532811 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*src0).t == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC3 = ((*(*src0).t).kind == ((Ttypekind292244) 21));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468(dest0);
LOC7[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC7, 2);
goto BeforeRet;
}
LA5: ;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106233624832));
switch ((*ty0).kind) {
case ((Ttypekind292244) 22):
{
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
break;
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC12)) goto LA13;
LOC12 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA13: ;
if (!LOC12) goto LA14;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA10;
LA14: ;
{
TY535238 LOC17;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = addrloc_538204_839829468(dest0);
LOC17[1] = rdloc_538188_839829468(src0);
LOC17[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_252), LOC17, 3);
}
LA10: ;
}
break;
case ((Ttypekind292244) 28):
{
{
NIM_BOOL LOC21;
LOC21 = (NIM_BOOL)0;
LOC21 = !(((flags0 &(1U<<((NU)(((Tassignmentflag538302) 0))&7U)))!=0));
if (!(LOC21)) goto LA22;
LOC21 = !(((*src0).s == ((Tstorageloc292812) 1)));
LA22: ;
if (!LOC21) goto LA23;
genrefassign_538311_839829468(p0, dest0, src0, flags0);
}
goto LA19;
LA23: ;
{
{
NIM_BOOL LOC28;
NIM_BOOL LOC30;
TY532811 LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = ((*dest0).s == ((Tstorageloc292812) 2));
if (LOC28) goto LA29;
LOC30 = (NIM_BOOL)0;
LOC30 = usesnativegc_169177_2607990831();
LOC28 = !(LOC30);
LA29: ;
if (!LOC28) goto LA31;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468(dest0);
LOC33[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_253), LOC33, 2);
}
goto LA26;
LA31: ;
{
Tloc292816 tmp0;
TY535238 LOC37;
TY178507 LOC38;
if (!((*dest0).s == ((Tstorageloc292812) 3))) goto LA35;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, ty0, (&tmp0), NIM_FALSE);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468(dest0);
LOC37[1] = rdloc_538188_839829468(src0);
LOC37[2] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_254), LOC37, 3);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&tmp0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC38, 1);
}
goto LA26;
LA35: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = addrloc_538204_839829468(dest0);
LOC40[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_256), LOC40, 2);
}
LA26: ;
}
LA19: ;
}
break;
case ((Ttypekind292244) 25):
{
{
NIM_BOOL LOC44;
Tloc292816 a0;
Ropeobj178006* LOC47;
Tloc292816 LOC48;
Tloc292816 b0;
Ropeobj178006* LOC49;
Tloc292816 LOC50;
TY532811 LOC51;
LOC44 = (NIM_BOOL)0;
LOC44 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC44) goto LA45;
memset((void*)(&a0), 0, sizeof(a0));
LOC47 = (Ropeobj178006*)0;
LOC47 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC48), 0, sizeof(LOC48));
optasgnloc_549789_839829468(dest0, (*dest0).t, LOC47, (&LOC48));
memcpy((void*)(&a0), (NIM_CONST void*)(&LOC48), sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
LOC49 = (Ropeobj178006*)0;
LOC49 = rope_178277_2381377266(((NimStringDesc*) &T839829468_258));
memset((void*)(&LOC50), 0, sizeof(LOC50));
optasgnloc_549789_839829468(src0, (*dest0).t, LOC49, (&LOC50));
memcpy((void*)(&b0), (NIM_CONST void*)(&LOC50), sizeof(b0));
genrefassign_538311_839829468(p0, (&a0), (&b0), flags0);
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468(dest0);
LOC51[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_259), LOC51, 2);
}
goto LA42;
LA45: ;
{
TY532811 LOC53;
memset((void*)LOC53, 0, sizeof(LOC53));
LOC53[0] = rdloc_538188_839829468(dest0);
LOC53[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC53, 2);
}
LA42: ;
}
break;
case ((Ttypekind292244) 18):
{
{
NIM_BOOL LOC57;
LOC57 = (NIM_BOOL)0;
LOC57 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC57) goto LA58;
{
NI LOC62;
LOC62 = (NI)0;
LOC62 = len_295339_850551059((*dest0).t);
if (!(LOC62 <= ((NI) 4))) goto LA63;
genoptasgntuple_550001_839829468(p0, dest0, src0, flags0);
}
goto LA60;
LA63: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA60: ;
}
goto LA55;
LA58: ;
{
TY532811 LOC67;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC67[0] = rdloc_538188_839829468(dest0);
LOC67[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC67, 2);
}
LA55: ;
}
break;
case ((Ttypekind292244) 17):
{
{
NIM_BOOL LOC71;
TY532811 LOC74;
LOC71 = (NIM_BOOL)0;
LOC71 = isimportedcpptype_533478_839829468(ty0);
if (!LOC71) goto LA72;
memset((void*)LOC74, 0, sizeof(LOC74));
LOC74[0] = rdloc_538188_839829468(dest0);
LOC74[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC74, 2);
}
goto LA69;
LA72: ;
{
NIM_BOOL LOC76;
LOC76 = (NIM_BOOL)0;
LOC76 = isobjlackingtypefield_533515_839829468(ty0);
if (!!(LOC76)) goto LA77;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA69;
LA77: ;
{
NIM_BOOL LOC80;
LOC80 = (NIM_BOOL)0;
LOC80 = needscomplexassignment_533511_839829468(ty0);
if (!LOC80) goto LA81;
{
NIM_BOOL LOC85;
NI LOC87;
Ropeobj178006* LOC90;
LOC85 = (NIM_BOOL)0;
LOC85 = (*ty0).sons->data[((NI) 0)] == 0;
if (!(LOC85)) goto LA86;
LOC87 = (NI)0;
LOC87 = asgncomplexity_549751_839829468((*ty0).n);
LOC85 = (LOC87 <= ((NI) 4));
LA86: ;
if (!LOC85) goto LA88;
LOC90 = (Ropeobj178006*)0;
LOC90 = gettypedesc_535673_839829468((*p0).module, ty0);
ty0 = getuniquetype_528640_2036603609(ty0);
{
NimStringDesc* LOC95;
if (!!(!(((*ty0).n == NIM_NIL)))) goto LA93;
LOC95 = (NimStringDesc*)0;
LOC95 = HEX24_196185_1689653243(T839829468_264);
internalerror_196113_155036129(LOC95);
}
LA93: ;
genoptasgnobject_550084_839829468(p0, dest0, src0, flags0, (*ty0).n);
}
goto LA83;
LA88: ;
{
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
LA83: ;
}
goto LA69;
LA81: ;
{
TY532811 LOC98;
memset((void*)LOC98, 0, sizeof(LOC98));
LOC98[0] = rdloc_538188_839829468(dest0);
LOC98[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC98, 2);
}
LA69: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC102) goto LA103;
gengenericasgn_550167_839829468(p0, dest0, src0, flags0);
}
goto LA100;
LA103: ;
{
TY535238 LOC106;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC106, 0, sizeof(LOC106));
LOC106[0] = rdloc_538188_839829468(dest0);
LOC106[1] = rdloc_538188_839829468(src0);
LOC106[2] = gettypedesc_535673_839829468((*p0).module, ty0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_261), LOC106, 3);
}
LA100: ;
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
NIM_BOOL LOC110;
TY535238 LOC113;
LOC110 = (NIM_BOOL)0;
LOC110 = needscomplexassignment_533511_839829468((*dest0).t);
if (!LOC110) goto LA111;
memset((void*)LOC113, 0, sizeof(LOC113));
LOC113[0] = addrloc_538204_839829468(dest0);
LOC113[1] = addrloc_538204_839829468(src0);
LOC113[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_266), LOC113, 3);
}
goto LA108;
LA111: ;
{
TY532811 LOC115;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC115, 0, sizeof(LOC115));
LOC115[0] = rdloc_538188_839829468(dest0);
LOC115[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_267), LOC115, 2);
}
LA108: ;
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC119;
TY535238 LOC122;
NI64 LOC123;
LOC119 = (Tctypekind529007)0;
LOC119 = maptype_533394_839829468(ty0);
if (!(LOC119 == ((Tctypekind529007) 17))) goto LA120;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC122, 0, sizeof(LOC122));
LOC122[0] = rdloc_538188_839829468(dest0);
LOC122[1] = rdloc_538188_839829468(src0);
LOC123 = (NI64)0;
LOC123 = getsize_320135_3876443242((*dest0).t);
LOC122[2] = rope_178401_2381377266(LOC123);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC122, 3);
}
goto LA117;
LA120: ;
{
TY532811 LOC125;
memset((void*)LOC125, 0, sizeof(LOC125));
LOC125[0] = rdloc_538188_839829468(dest0);
LOC125[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC125, 2);
}
LA117: ;
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC127;
memset((void*)LOC127, 0, sizeof(LOC127));
LOC127[0] = rdloc_538188_839829468(dest0);
LOC127[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC127, 2);
}
break;
default:
{
NimStringDesc* LOC129;
LOC129 = (NimStringDesc*)0;
LOC129 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 15);
appendString(LOC129, ((NimStringDesc*) &T839829468_269));
appendString(LOC129, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC129);
}
break;
}
}BeforeRet: ;
}
N_NIMCALL(void, putlocintodest_539258_839829468)(Tcproc529021* p0, Tloc292816* d0, Tloc292816* s0) {
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), s0, 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), s0, 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
genericAssign((void*)(&(*d0)), (void*)s0, (&NTI292816));
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, issimpleconst_532311_839829468)(Ttype292840* typ0) {
NIM_BOOL result0;
Ttype292840* t0;
NIM_BOOL LOC1;
NIM_BOOL LOC3;
result0 = (NIM_BOOL)0;
t0 = skiptypes_296099_850551059(typ0, IL64(211106240964864));
LOC1 = (NIM_BOOL)0;
LOC1 = !(((17760272 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0));
if (!(LOC1)) goto LA2;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
LOC1 = !(LOC3);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putintodest_550468_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0, Tstorageloc292812 s0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 6), t0, s0);
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 6);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NI64, bitsettoword_549578_839829468)(Tbitset339004* s0, NI size0) {
NI64 result0;
result0 = (NI64)0;
result0 = IL64(0);
{
NI j_549612_839829468;
NI HEX3Atmp_549622_839829468;
NI res_549625_839829468;
j_549612_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)0;
HEX3Atmp_549622_839829468 = (NI)(size0 - ((NI) 1));
res_549625_839829468 = ((NI) 0);
{
while (1) {
if (!(res_549625_839829468 <= HEX3Atmp_549622_839829468)) goto LA3;
j_549612_839829468 = res_549625_839829468;
{
if (!(j_549612_839829468 < (s0 ? s0->Sup.len : 0))) goto LA6;
result0 = (NI64)(result0 | (NI64)((NU64)(((NI64)(NU64)(NU8)(s0->data[j_549612_839829468]))) << (NU64)(((NI64) ((NI)(j_549612_839829468 * ((NI) 8)))))));
}
LA6: ;
res_549625_839829468 += ((NI) 1);
} LA3: ;
}
}
return result0;
}
N_NIMCALL(Ropeobj178006*, genrawsetdata_549629_839829468)(Tbitset339004* cs0, NI size0) {
Ropeobj178006* result0;
NimStringDesc* frmt0;
result0 = (Ropeobj178006*)0;
frmt0 = (NimStringDesc*)0;
{
TY533289 LOC5;
if (!(((NI) 8) < size0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_273), LOC5, 0);
{
NI i_549649_839829468;
NI HEX3Atmp_549657_839829468;
NI res_549660_839829468;
i_549649_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)0;
HEX3Atmp_549657_839829468 = (NI)(size0 - ((NI) 1));
res_549660_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC19;
NimStringDesc* LOC20;
if (!(res_549660_839829468 <= HEX3Atmp_549657_839829468)) goto LA8;
i_549649_839829468 = res_549660_839829468;
{
if (!(i_549649_839829468 < (NI)(size0 - ((NI) 1)))) goto LA11;
{
if (!(((NI) ((NI)((NI)(i_549649_839829468 + ((NI) 1)) % ((NI) 8)))) == ((NI) 0))) goto LA15;
frmt0 = copyString(((NimStringDesc*) &T839829468_274));
}
goto LA13;
LA15: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_275));
}
LA13: ;
}
goto LA9;
LA11: ;
{
frmt0 = copyString(((NimStringDesc*) &T839829468_276));
}
LA9: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC20 = (NimStringDesc*)0;
LOC20 = nsuToHex(((NI64)(NU64)(NU8)(cs0->data[i_549649_839829468])), ((NI) 2));
LOC19[0] = rope_178277_2381377266(LOC20);
addf_179205_2381377266(&result0, frmt0, LOC19, 1);
res_549660_839829468 += ((NI) 1);
} LA8: ;
}
}
}
goto LA1;
LA3: ;
{
NI64 LOC22;
LOC22 = (NI64)0;
LOC22 = bitsettoword_549578_839829468(cs0, size0);
result0 = intliteral_539270_839829468(LOC22);
}
LA1: ;
return result0;
}
N_NIMCALL(void, appcg_532640_839829468)(Tcgen529027* m0, Tcfilesection529005 s0, NimStringDesc* frmt0, Ropeobj178006** args0, NI args0Len0) {
Ropeobj178006* LOC1;
LOC1 = (Ropeobj178006*)0;
LOC1 = ropecg_532407_839829468(m0, frmt0, args0, args0Len0);
add_178482_2381377266(&(*m0).s[(s0)- 0], LOC1);
}
N_NIMCALL(Ropeobj178006*, genconstseq_559371_839829468)(Tcproc529021* p0, Tnode292802* n0, Ttype292840* t0) {
Ropeobj178006* result0;
Ropeobj178006* data0;
TY178507 LOC1;
NI LOC2;
TY535235 LOC18;
NI LOC19;
TY532811 LOC20;
result0 = (Ropeobj178006*)0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
LOC1[0] = rope_178401_2381377266(((NI64) (LOC2)));
data0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_277), LOC1, 1);
{
NI LOC5;
LOC5 = (NI)0;
LOC5 = len_293081_850551059(n0);
if (!(((NI) 0) < LOC5)) goto LA6;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_278));
{
NI i_559395_839829468;
NI HEX3Atmp_559411_839829468;
NI LOC9;
NI res_559414_839829468;
i_559395_839829468 = (NI)0;
HEX3Atmp_559411_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = len_293081_850551059(n0);
HEX3Atmp_559411_839829468 = (NI)(LOC9 - ((NI) 1));
res_559414_839829468 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC17;
if (!(res_559414_839829468 <= HEX3Atmp_559411_839829468)) goto LA11;
i_559395_839829468 = res_559414_839829468;
{
TY533289 LOC16;
if (!(((NI) 0) < i_559395_839829468)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
addf_179205_2381377266(&data0, ((NimStringDesc*) &T839829468_279), LOC16, 0);
}
LA14: ;
LOC17 = (Ropeobj178006*)0;
LOC17 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[i_559395_839829468]);
add_178482_2381377266(&data0, LOC17);
res_559414_839829468 += ((NI) 1);
} LA11: ;
}
}
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
}
LA6: ;
add_178487_2381377266(&data0, ((NimStringDesc*) &T839829468_280));
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
LOC19 = (NI)0;
LOC19 = len_293081_850551059(n0);
LOC18[1] = rope_178401_2381377266(((NI64) (LOC19)));
LOC18[2] = result0;
LOC18[3] = data0;
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 8), ((NimStringDesc*) &T839829468_281), LOC18, 4);
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC20[1] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_282), LOC20, 2);
return result0;
}
N_NIMCALL(Ropeobj178006*, gennamedconstexpr_559284_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!((*n0).kind == ((Tnodekind292020) 34))) goto LA3;
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA3: ;
{
result0 = genconstexpr_554849_839829468(p0, n0);
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstsimplelist_559299_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
NI length0;
TY533289 LOC10;
result0 = (Ropeobj178006*)0;
length0 = sonslen_295351_850551059(n0);
result0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_223));
{
NI i_559333_839829468;
NI HEX3Atmp_559362_839829468;
NI HEX3Atmp_559363_839829468;
NI res_559366_839829468;
i_559333_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = (NI)0;
HEX3Atmp_559363_839829468 = (NI)0;
HEX3Atmp_559362_839829468 = ((*n0).kind == ((Tnodekind292020) 38));
HEX3Atmp_559363_839829468 = (NI)(length0 - ((NI) 2));
res_559366_839829468 = ((NI) (HEX3Atmp_559362_839829468));
{
while (1) {
TY178507 LOC4;
if (!(res_559366_839829468 <= HEX3Atmp_559363_839829468)) goto LA3;
i_559333_839829468 = res_559366_839829468;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[i_559333_839829468]);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_283), LOC4, 1);
res_559366_839829468 += ((NI) 1);
} LA3: ;
}
}
{
Ropeobj178006* LOC9;
if (!(((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < length0)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = gennamedconstexpr_559284_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))]);
add_178482_2381377266(&result0, LOC9);
}
LA7: ;
memset((void*)LOC10, 0, sizeof(LOC10));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_160), LOC10, 0);
return result0;
}
N_NIMCALL(Ropeobj178006*, genconstexpr_554849_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
switch ((*n0).kind) {
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
{
result0 = genconstexpr_554849_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)]);
}
break;
case ((Tnodekind292020) 39):
{
Tbitset339004* cs0;
NI64 LOC3;
cs0 = (Tbitset339004*)0;
tobitset_340001_452470228(n0, (&cs0));
LOC3 = (NI64)0;
LOC3 = getsize_320135_3876443242((*n0).typ);
result0 = genrawsetdata_549629_839829468(cs0, ((NI) (LOC3)));
}
break;
case ((Tnodekind292020) 41):
case ((Tnodekind292020) 37):
case ((Tnodekind292020) 155):
case ((Tnodekind292020) 38):
{
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
{
if (!((*t0).kind == ((Ttypekind292244) 24))) goto LA7;
result0 = genconstseq_559371_839829468(p0, n0, t0);
}
goto LA5;
LA7: ;
{
result0 = genconstsimplelist_559299_839829468(p0, n0);
}
LA5: ;
}
break;
default:
{
Tloc292816 d0;
memset((void*)(&d0), 0, sizeof(d0));
initlocexpr_539283_839829468(p0, n0, (&d0));
result0 = rdloc_538188_839829468((&d0));
}
break;
}
return result0;
}
N_NIMCALL(void, requestconstimpl_539240_839829468)(Tcproc529021* p0, Tsym292834* sym0) {
Tcgen529027* m0;
Tcgen529027* q0;
{ m0 = (*p0).module;
useheader_532369_839829468(m0, sym0);
{
Ropeobj178006* LOC5;
if (!((*sym0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(sym0);
fillloc_532282_839829468((&(*sym0).loc), ((Tlockind292808) 8), (*sym0).typ, LOC5, ((Tstorageloc292812) 1));
}
LA3: ;
{
if (!(((*sym0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA8;
goto BeforeRet;
}
LA8: ;
q0 = findpendingmodule_532241_839829468(m0, sym0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
TY535238 LOC17;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*sym0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468(q0, (*sym0).typ);
LOC17[1] = (*sym0).loc.r;
LOC17[2] = genconstexpr_554849_839829468((*q0).initproc, (*sym0).ast);
addf_179205_2381377266(&(*q0).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC22;
Ropeobj178006* headerdecl0;
TY532811 LOC25;
LOC20 = (NIM_BOOL)0;
LOC20 = !((q0 == m0));
if (!(LOC20)) goto LA21;
LOC22 = (NIM_BOOL)0;
LOC22 = containsorincl_268862_2627731572((&(*m0).declaredthings), (*sym0).Sup.id);
LOC20 = !(LOC22);
LA21: ;
if (!LOC20) goto LA23;
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
LOC25[1] = (*sym0).loc.r;
headerdecl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_284), LOC25, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
{
NIM_BOOL LOC28;
LOC28 = (NIM_BOOL)0;
LOC28 = (((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC28)) goto LA29;
LOC28 = !((generatedheader_532201_839829468 == NIM_NIL));
LA29: ;
if (!LOC28) goto LA30;
add_178482_2381377266(&(*generatedheader_532201_839829468).s[(((Tcfilesection529005) 8))- 0], headerdecl0);
}
LA30: ;
}
LA23: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencomplexconst_558249_839829468)(Tcproc529021* p0, Tsym292834* sym0, Tloc292816* d0) {
requestconstimpl_539240_839829468(p0, sym0);
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
static N_INLINE(Ropeobj178006**, procsec_529194_3723162438)(Tcproc529021* p0, Tcprocsection529011 s0) {
Ropeobj178006** result0;
result0 = (Ropeobj178006**)0;
result0 = &(*p0).blocks->data[((NI) 0)].sections[(s0)- 0];
return result0;
}
N_NIMCALL(void, accessthreadlocalvar_532945_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{
NIM_BOOL LOC3;
Ropeobj178006** LOC7;
TY533289 LOC8;
Ropeobj178006** LOC9;
TY533289 LOC10;
Ropeobj178006* LOC11;
LOC3 = (NIM_BOOL)0;
LOC3 = emulatedthreadvars_532949_839829468();
if (!(LOC3)) goto LA4;
LOC3 = !((*p0).threadvaraccessed);
LA4: ;
if (!LOC3) goto LA5;
(*p0).threadvaraccessed = NIM_TRUE;
(*(*p0).module).flags |= ((NU8)1)<<((((Codegenflag529025) 1))%(sizeof(NU8)*8));
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 0));
memset((void*)LOC8, 0, sizeof(LOC8));
addf_179205_2381377266(LOC7, ((NimStringDesc*) &T839829468_286), LOC8, 0);
LOC9 = (Ropeobj178006**)0;
LOC9 = procsec_529194_3723162438(p0, ((Tcprocsection529011) 1));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_287), LOC10, 0);
add_178482_2381377266(LOC9, LOC11);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isemptytype_297441_850551059)(Ttype292840* t0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = (t0 == NIM_NIL);
if (LOC1) goto LA2;
LOC1 = ((IL64(4611686018427388032) &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, putdataintodest_550436_839829468)(Tcproc529021* p0, Tloc292816* d0, Ttype292840* t0, Ropeobj178006* r0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
initloc_532273_839829468((&a0), ((Tlockind292808) 8), t0, ((Tstorageloc292812) 1));
a0.r = r0;
{
if (!(((*d0).flags &(1U<<((NU)(((Tlocflag292810) 2))&15U)))!=0)) goto LA7;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
}
goto LA5;
LA7: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 1);
}
LA5: ;
}
goto LA1;
LA3: ;
{
(*d0).k = ((Tlockind292808) 8);
unsureAsgnRef((void**) (&(*d0).t), t0);
unsureAsgnRef((void**) (&(*d0).r), r0);
}
LA1: ;
}
N_NIMCALL(NIM_BOOL, freshlineinfo_532818_839829468)(Tcproc529021* p0, Tlineinfo191336 info0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = !(((*p0).lastlineinfo.line == info0.line));
if (LOC3) goto LA4;
LOC3 = !(((*p0).lastlineinfo.fileindex == info0.fileindex));
LA4: ;
if (!LOC3) goto LA5;
(*p0).lastlineinfo.line = info0.line;
(*p0).lastlineinfo.fileindex = info0.fileindex;
result0 = NIM_TRUE;
}
LA5: ;
return result0;
}
N_NIMCALL(void, genlinedir_532823_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI line0;
Ropeobj178006** LOC11;
NimStringDesc* LOC12;
line0 = safelinenm_532721_839829468((*t0).info);
{
Ropeobj178006** LOC5;
TY533289 LOC6;
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
Ropeobj178006* LOC10;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 28))&63U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006**)0;
LOC5 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_293), LOC6, 0);
LOC8 = (Ropeobj178006*)0;
LOC8 = sourceline_192065_155036129((*t0).info);
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178418_2381377266(LOC7, LOC8);
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX26_178418_2381377266(LOC9, rnl_178903_2381377266);
add_178482_2381377266(LOC5, LOC10);
}
LA3: ;
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC12 = (NimStringDesc*)0;
LOC12 = tofullpath_192261_155036129((*t0).info.fileindex);
genclinedir_532725_839829468(LOC11, LOC12, line0);
{
NIM_BOOL LOC15;
NIM_BOOL LOC17;
LOC15 = (NIM_BOOL)0;
LOC15 = ((163840 & (*p0).options) == 163840);
if (!(LOC15)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*p0).prc == NIM_NIL);
if (LOC17) goto LA18;
LOC17 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA18: ;
LOC15 = LOC17;
LA16: ;
if (!LOC15) goto LA19;
{
NIM_BOOL LOC23;
TY532811 LOC26;
NimStringDesc* LOC27;
LOC23 = (NIM_BOOL)0;
LOC23 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC23) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rope_178401_2381377266(((NI64) (line0)));
LOC27 = (NimStringDesc*)0;
LOC27 = tofilename_192257_155036129((*t0).info.fileindex);
LOC26[1] = makecstring_191638_155036129(LOC27);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_294), LOC26, 2);
}
LA24: ;
}
goto LA13;
LA19: ;
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
NIM_BOOL LOC32;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((98304 & (*p0).options) == 98304);
if (!(LOC30)) goto LA31;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*p0).prc == NIM_NIL);
if (LOC32) goto LA33;
LOC32 = !((((*(*p0).prc).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0));
LA33: ;
LOC30 = LOC32;
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA34;
LOC29 = (((NI32) 0) <= (*t0).info.fileindex);
LA34: ;
if (!LOC29) goto LA35;
{
NIM_BOOL LOC39;
TY532811 LOC42;
LOC39 = (NIM_BOOL)0;
LOC39 = freshlineinfo_532818_839829468(p0, (*t0).info);
if (!LOC39) goto LA40;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rope_178401_2381377266(((NI64) (line0)));
LOC42[1] = quotedfilename_196818_155036129((*t0).info);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_295), LOC42, 2);
}
LA40: ;
}
goto LA13;
LA35: ;
LA13: ;
}
N_NIMCALL(Ropeobj178006*, getlabel_539217_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
(*p0).labels += ((NI) 1);
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*p0).labels)));
result0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1);
return result0;
}
N_NIMCALL(void, fixlabel_539230_839829468)(Tcproc529021* p0, Ropeobj178006* labl0) {
TY178507 LOC1;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = labl0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_299), LOC1, 1);
}
N_NIMCALL(void, genandor_554311_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Ropeobj178006* L0;
Tloc292816 tmp0;
L0 = (Ropeobj178006*)0;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
(*p0).splitdecls += ((NI) 1);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
L0 = getlabel_539217_839829468(p0);
{
TY532811 LOC5;
if (!(m0 == ((Tmagic292524) 127))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&tmp0));
LOC5[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_297), LOC5, 2);
}
goto LA1;
LA3: ;
{
TY532811 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&tmp0));
LOC7[1] = L0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_298), LOC7, 2);
}
LA1: ;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&tmp0));
fixlabel_539230_839829468(p0, L0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA8;
LA10: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA8: ;
(*p0).splitdecls -= ((NI) 1);
}
N_NIMCALL(void, unaryarith_552646_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Ttype292840* t0;
TY535238 LOC1;
NI64 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(t0);
LOC1[1] = rope_178401_2381377266((NI64)(LOC2 * IL64(8)));
LOC1[2] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(unarithtab_552653_839829468[(op0)- 99], LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, unaryarithoverflow_551633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Ttype292840* t0;
TY532811 LOC7;
NI64 LOC8;
Ropeobj178006* LOC9;
memset((void*)(&a0), 0, sizeof(a0));
t0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
TY532811 LOC5;
NI64 LOC6;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (NI64)0;
LOC6 = firstord_320001_3876443242(t0);
LOC5[1] = intliteral_539270_839829468(LOC6);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_317), LOC5, 2);
}
LA3: ;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242(t0);
LOC7[1] = rope_178401_2381377266((NI64)(LOC8 * IL64(8)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(opr_551640_839829468[(m0)- 96], LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryarith_551819_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
NI64 s0;
NI64 LOC1;
NI64 LOC2;
TY535235 LOC3;
Ropeobj178006* LOC4;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
s0 = (NI64)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(a0.t);
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(b0.t);
s0 = (NI64)(((LOC1 >= LOC2) ? LOC1 : LOC2) * IL64(8));
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = rdloc_538188_839829468((&a0));
LOC3[1] = rdloc_538188_839829468((&b0));
LOC3[2] = rope_178401_2381377266(s0);
LOC3[3] = getsimpletypedesc_533936_839829468((*p0).module, (*e0).typ);
LOC4 = (Ropeobj178006*)0;
LOC4 = HEX25_178905_2381377266(binarithtab_551826_839829468[(op0)- 52], LOC3, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC4, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binaryfloatarith_556729_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
{
Tloc292816 a0;
Tloc292816 b0;
TY535235 LOC5;
Tnode292802* LOC6;
Ropeobj178006* LOC7;
if (!!(((384 & (*p0).options) == 0))) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(opr_556763_839829468[(m0)- 52]);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC5[3] = getsimpletypedesc_533936_839829468((*p0).module, (*LOC6).typ);
LOC7 = (Ropeobj178006*)0;
LOC7 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_319), LOC5, 4);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
{
TY178507 LOC12;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 7))&31U)))!=0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_323), LOC12, 1);
}
LA10: ;
{
TY178507 LOC17;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 8))&31U)))!=0)) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&(*d0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_324), LOC17, 1);
}
LA15: ;
}
goto LA1;
LA3: ;
{
binaryarith_551819_839829468(p0, e0, d0, m0);
}
LA1: ;
}
N_NIMCALL(void, geneqproc_552214_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC3;
TY532811 LOC6;
Ropeobj178006* LOC7;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rdloc_538188_839829468((&a0));
LOC6[1] = rdloc_538188_839829468((&b0));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_352), LOC6, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC7, ((Tstorageloc292812) 0));
}
goto LA1;
LA4: ;
{
TY532811 LOC9;
Ropeobj178006* LOC10;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rdloc_538188_839829468((&a0));
LOC9[1] = rdloc_538188_839829468((&b0));
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_341), LOC9, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, rdcharloc_538227_839829468)(Tloc292816* a0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdloc_538188_839829468(a0);
{
Ttype292840* LOC3;
TY178507 LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*a0).t, IL64(211106233624832));
if (!((*LOC3).kind == ((Ttypekind292244) 2))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_358), LOC6, 1);
}
LA4: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, binaryarithoverflowraw_551235_839829468)(Tcproc529021* p0, Ttype292840* t0, Tloc292816* a0, Tloc292816* b0, NimStringDesc* frmt0) {
Ropeobj178006* result0;
NI64 size0;
Ropeobj178006* storage0;
TY532811 LOC6;
TY535238 LOC7;
result0 = (Ropeobj178006*)0;
size0 = getsize_320135_3876443242(t0);
{
if (!(size0 < ((NI64) (intsize_176641_4151366050)))) goto LA3;
storage0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_36));
}
goto LA1;
LA3: ;
{
storage0 = gettypedesc_535673_839829468((*p0).module, t0);
}
LA1: ;
result0 = gettempname_533598_839829468((*p0).module);
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = storage0;
LOC6[1] = result0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_54), LOC6, 2);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = result0;
LOC7[1] = rdcharloc_538227_839829468(a0);
LOC7[2] = rdcharloc_538227_839829468(b0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC7, 3);
{
NIM_BOOL LOC10;
TY535238 LOC14;
NI64 LOC15;
NI64 LOC16;
LOC10 = (NIM_BOOL)0;
LOC10 = (size0 < ((NI64) (intsize_176641_4151366050)));
if (LOC10) goto LA11;
LOC10 = ((1064960 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0);
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC15 = (NI64)0;
LOC15 = firstord_320001_3876443242(t0);
LOC14[1] = intliteral_539270_839829468(LOC15);
LOC16 = (NI64)0;
LOC16 = lastord_320004_3876443242(t0);
LOC14[2] = intliteral_539270_839829468(LOC16);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_359), LOC14, 3);
}
LA12: ;
return result0;
}
N_NIMCALL(void, binaryarithoverflow_551262_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 m0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
Ropeobj178006* res0;
TY535238 LOC5;
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC5[1] = rdloc_538188_839829468((&a0));
LOC5[2] = rdloc_538188_839829468((&b0));
res0 = HEX25_178905_2381377266(opr_551279_839829468[(m0)- 45], LOC5, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, res0, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
Ropeobj178006* res0;
NimStringDesc* LOC7;
TY532811 LOC13;
Ropeobj178006* LOC14;
LOC7 = (NimStringDesc*)0;
{
if (!((*t0).kind == ((Ttypekind292244) 35))) goto LA10;
LOC7 = copyString(prc64_551274_839829468[(m0)- 45]);
}
goto LA8;
LA10: ;
{
LOC7 = copyString(prc_551269_839829468[(m0)- 45]);
}
LA8: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, t0, (&a0), (&b0), LOC7);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC13[1] = res0;
LOC14 = (Ropeobj178006*)0;
LOC14 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC13, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, ((Tstorageloc292812) 0));
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, lenfield_539305_839829468)(Tcproc529021* p0) {
Ropeobj178006* result0;
NimStringDesc* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (NimStringDesc*)0;
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC4) goto LA5;
LOC4 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA5: ;
if (!LOC4) goto LA6;
LOC1 = copyString(((NimStringDesc*) &T839829468_157));
}
goto LA2;
LA6: ;
{
LOC1 = copyString(((NimStringDesc*) &T839829468_158));
}
LA2: ;
result0 = rope_178277_2381377266(LOC1);
return result0;
}
N_NIMCALL(void, gcusage_554439_839829468)(Tnode292802* n0) {
{
NimStringDesc* LOC5;
if (!(gselectedgc_169133_2607990831 == ((Tgcmode169080) 0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = rendertree_311044_382274130(n0, 0);
message_196095_155036129((*n0).info, ((Tmsgkind191002) 263), LOC5);
}
LA3: ;
}
N_NIMCALL(void, genrepr_555339_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* t0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
switch ((*t0).kind) {
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 35):
case ((Ttypekind292244) 40) ... ((Ttypekind292244) 44):
{
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_371), LOC2, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC3, a0.s);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
TY178507 LOC5;
Ropeobj178006* LOC6;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_372), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
break;
case ((Ttypekind292244) 1):
{
TY178507 LOC8;
Ropeobj178006* LOC9;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_373), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC9, a0.s);
}
break;
case ((Ttypekind292244) 2):
{
TY178507 LOC11;
Ropeobj178006* LOC12;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_374), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC12, a0.s);
}
break;
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 15):
{
TY532811 LOC14;
Ropeobj178006* LOC15;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&a0));
LOC14[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC15 = (Ropeobj178006*)0;
LOC15 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_375), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
break;
case ((Ttypekind292244) 28):
{
TY178507 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_376), LOC17, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
break;
case ((Ttypekind292244) 19):
{
TY532811 LOC20;
Ropeobj178006* LOC21;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = addrloc_538204_839829468((&a0));
LOC20[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC21 = (Ropeobj178006*)0;
LOC21 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_377), LOC20, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC21, a0.s);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
Tloc292816 b0;
TY532811 LOC34;
Ttype292840* LOC35;
Ropeobj178006* LOC36;
memset((void*)(&b0), 0, sizeof(b0));
switch ((*a0.t).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC24;
Ropeobj178006* LOC25;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = rdloc_538188_839829468((&a0));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC24, 1);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC25, a0.s);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC27;
Ropeobj178006* LOC28;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rdloc_538188_839829468((&a0));
LOC27[1] = lenfield_539305_839829468(p0);
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC27, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC28, a0.s);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC30;
NI64 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC30, 0, sizeof(LOC30));
LOC30[0] = rdloc_538188_839829468((&a0));
LOC31 = (NI64)0;
LOC31 = lengthord_320007_3876443242(a0.t);
LOC30[1] = rope_178401_2381377266(LOC31);
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC30, 2);
putintodest_550468_839829468(p0, (&b0), (*e0).typ, LOC32, a0.s);
}
break;
default:
{
internalerror_196100_155036129((*(*e0).kindU.S6.sons->data[((NI) 0)]).info, ((NimStringDesc*) &T839829468_381));
}
break;
}
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = rdloc_538188_839829468((&b0));
LOC35 = (Ttype292840*)0;
LOC35 = elemtype_320394_3876443242(t0);
LOC34[1] = gentypeinfo_535941_839829468((*p0).module, LOC35);
LOC36 = (Ropeobj178006*)0;
LOC36 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_382), LOC34, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC36, a0.s);
}
break;
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 5):
case ((Ttypekind292244) 24):
{
TY532811 LOC38;
Ropeobj178006* LOC39;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = rdloc_538188_839829468((&a0));
LOC38[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC39 = (Ropeobj178006*)0;
LOC39 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC38, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC39, a0.s);
}
break;
case ((Ttypekind292244) 3):
case ((Ttypekind292244) 62):
{
localerror_196085_155036129((*e0).info, ((NimStringDesc*) &T839829468_384));
}
break;
default:
{
TY532811 LOC42;
Ropeobj178006* LOC43;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = addrloc_538204_839829468((&a0));
LOC42[1] = gentypeinfo_535941_839829468((*p0).module, t0);
LOC43 = (Ropeobj178006*)0;
LOC43 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_383), LOC42, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC43, a0.s);
}
break;
}
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gengettypeinfo_555383_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC1, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genswap_555638_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 tmp0;
Ttype292840* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
gettemp_537032_839829468(p0, LOC1, (&tmp0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
genassignment_539264_839829468(p0, (&tmp0), (&a0), 0);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
genassignment_539264_839829468(p0, (&b0), (&tmp0), 0);
}
N_NIMCALL(void, unaryexpr_551209_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, binarystmt_550501_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_387));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
LOC5[1] = rdloc_538188_839829468((&b0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 2);
}
N_NIMCALL(void, genstrconcat_554452_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 tmp0;
NI L0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*e0).typ, (&tmp0), NIM_FALSE);
L0 = ((NI) 0);
appends0 = NIM_NIL;
lens0 = NIM_NIL;
{
NI i_554475_839829468;
NI HEX3Atmp_554547_839829468;
NI LOC2;
NI res_554550_839829468;
i_554475_839829468 = (NI)0;
HEX3Atmp_554547_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554547_839829468 = (NI)(LOC2 - ((NI) 2));
res_554550_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554550_839829468 <= HEX3Atmp_554547_839829468)) goto LA4;
i_554475_839829468 = res_554550_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = tmp0.r;
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554475_839829468 + ((NI) 1))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = tmp0.r;
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554550_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = tmp0.r;
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_393), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA25;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA23;
LA25: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA23: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genstrappend_554554_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 dest0;
Ropeobj178006* appends0;
Ropeobj178006* lens0;
NI L0;
TY535238 LOC21;
Ropeobj178006** LOC22;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&dest0), 0, sizeof(dest0));
appends0 = (Ropeobj178006*)0;
lens0 = (Ropeobj178006*)0;
L0 = ((NI) 0);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&dest0));
{
NI i_554615_839829468;
NI HEX3Atmp_554676_839829468;
NI LOC2;
NI res_554679_839829468;
i_554615_839829468 = (NI)0;
HEX3Atmp_554676_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554676_839829468 = (NI)(LOC2 - ((NI) 3));
res_554679_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554679_839829468 <= HEX3Atmp_554676_839829468)) goto LA4;
i_554615_839829468 = res_554679_839829468;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))], (&a0));
{
Ttype292840* LOC7;
TY532811 LOC10;
Ropeobj178006* LOC11;
LOC7 = (Ttype292840*)0;
LOC7 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).typ, IL64(211106242013440));
if (!((*LOC7).kind == ((Ttypekind292244) 2))) goto LA8;
L0 += ((NI) 1);
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&dest0));
LOC10[1] = rdloc_538188_839829468((&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_390), LOC10, 2);
add_178482_2381377266(&appends0, LOC11);
}
goto LA5;
LA8: ;
{
TY532811 LOC19;
Ropeobj178006* LOC20;
{
if (!((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind >= ((Tnodekind292020) 20) && (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kind <= ((Tnodekind292020) 22))) goto LA15;
L0 += ((*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval ? (*(*e0).kindU.S6.sons->data[(NI)(i_554615_839829468 + ((NI) 2))]).kindU.S3.strval->Sup.len : 0);
}
goto LA13;
LA15: ;
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468((&a0));
LOC18[1] = lenfield_539305_839829468(p0);
addf_179205_2381377266(&lens0, ((NimStringDesc*) &T839829468_391), LOC18, 2);
}
LA13: ;
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&dest0));
LOC19[1] = rdloc_538188_839829468((&a0));
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_392), LOC19, 2);
add_178482_2381377266(&appends0, LOC20);
}
LA5: ;
res_554679_839829468 += ((NI) 1);
} LA4: ;
}
}
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468((&dest0));
LOC21[1] = lens0;
LOC21[2] = rope_178401_2381377266(((NI64) (L0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_395), LOC21, 3);
LOC22 = (Ropeobj178006**)0;
LOC22 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC22, appends0);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, genseqelemappend_554683_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
NimStringDesc* seqappendpattern0;
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 dest0;
Ttype292840* bt0;
TY535238 LOC8;
Ttype292840* LOC9;
TY532811 LOC10;
TY532811 LOC11;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_396));
}
goto LA1;
LA5: ;
{
seqappendpattern0 = copyString(((NimStringDesc*) &T839829468_397));
}
LA1: ;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&dest0), 0, sizeof(dest0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
bt0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 2)]).typ, IL64(211106240964864));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC8[1] = gettypedesc_535673_839829468((*p0).module, LOC9);
LOC8[2] = gettypedesc_535673_839829468((*p0).module, bt0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), seqappendpattern0, LOC8, 3);
initloc_532273_839829468((&dest0), ((Tlockind292808) 6), bt0, ((Tstorageloc292812) 3));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdloc_538188_839829468((&a0));
LOC10[1] = lenfield_539305_839829468(p0);
dest0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_398), LOC10, 2);
genassignment_539264_839829468(p0, (&dest0), (&b0), 3);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdloc_538188_839829468((&a0));
LOC11[1] = lenfield_539305_839829468(p0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_399), LOC11, 2);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, binaryexpr_550549_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdloc_538188_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genstrequals_556667_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 x0;
Tnode292802* a0;
Tnode292802* b0;
memset((void*)(&x0), 0, sizeof(x0));
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
b0 = (*e0).kindU.S6.sons->data[((NI) 2)];
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*a0).kind == ((Tnodekind292020) 23));
if (LOC3) goto LA4;
LOC3 = ((*b0).kind == ((Tnodekind292020) 23));
LA4: ;
if (!LOC3) goto LA5;
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
TY532811 LOC12;
Ropeobj178006* LOC13;
LOC8 = (NIM_BOOL)0;
LOC8 = ((*a0).kind >= ((Tnodekind292020) 20) && (*a0).kind <= ((Tnodekind292020) 22));
if (!(LOC8)) goto LA9;
LOC8 = (((*a0).kindU.S3.strval) && ((*a0).kindU.S3.strval)->Sup.len == 0);
LA9: ;
if (!LOC8) goto LA10;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&x0));
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&x0));
LOC12[1] = lenfield_539305_839829468(p0);
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC12, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC13, ((Tstorageloc292812) 0));
}
goto LA1;
LA10: ;
{
NIM_BOOL LOC15;
TY532811 LOC19;
Ropeobj178006* LOC20;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*b0).kind >= ((Tnodekind292020) 20) && (*b0).kind <= ((Tnodekind292020) 22));
if (!(LOC15)) goto LA16;
LOC15 = (((*b0).kindU.S3.strval) && ((*b0).kindU.S3.strval)->Sup.len == 0);
LA16: ;
if (!LOC15) goto LA17;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&x0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&x0));
LOC19[1] = lenfield_539305_839829468(p0);
LOC20 = (Ropeobj178006*)0;
LOC20 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_400), LOC19, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC20, ((Tstorageloc292812) 0));
}
goto LA1;
LA17: ;
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_401));
}
LA1: ;
}
N_NIMCALL(void, genisnil_552620_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* t0;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*t0).kind == ((Ttypekind292244) 25));
if (!(LOC3)) goto LA4;
LOC3 = ((*t0).callconv == ((Tcallingconvention292002) 8));
LA4: ;
if (!LOC3) goto LA5;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_404));
}
goto LA1;
LA5: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_405));
}
LA1: ;
}
N_NIMCALL(void, gendollar_555391_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
a0.r = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA4;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA4: ;
genassignment_539264_839829468(p0, (&(*d0)), (&a0), 0);
gcusage_554439_839829468(n0);
}
N_NIMCALL(Ropeobj178006*, genofhelper_555140_839829468)(Tcproc529021* p0, Ttype292840* dest0, Ropeobj178006* a0) {
Ropeobj178006* result0;
Ropeobj178006* ti0;
result0 = (Ropeobj178006*)0;
ti0 = gentypeinfo_535941_839829468((*p0).module, dest0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY532811 LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 2))&31U)))!=0);
if (LOC3) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*(*p0).module).flags &(1U<<((NU)(((Codegenflag529025) 5))&7U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*dest0).flags &(1U<<((NU)(((Ttypeflag292431) 5))&31U)))!=0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = a0;
LOC9[1] = ti0;
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_414), LOC9, 2);
}
goto LA1;
LA7: ;
{
Ropeobj178006* LOC11;
Ropeobj178006* cache0;
Ropeobj178006* LOC12;
TY178507 LOC13;
TY535238 LOC14;
LOC11 = (Ropeobj178006*)0;
LOC11 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_129));
(*(*p0).module).labels += ((NI) 1);
LOC12 = (Ropeobj178006*)0;
LOC12 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
cache0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_415), LOC12);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = cache0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_416), LOC13, 1);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = a0;
LOC14[1] = ti0;
LOC14[2] = cache0;
result0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_417), LOC14, 3);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genof_555201_839829468)(Tcproc529021* p0, Tnode292802* x0, Ttype292840* typ0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
Ttype292840* LOC41;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, x0, (&a0));
dest0 = skiptypes_296099_850551059(typ0, IL64(211106247256320));
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC16;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA2;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA5;
nilcheck0 = r0;
}
LA5: ;
{
NIM_BOOL LOC9;
NIM_BOOL LOC11;
TY178507 LOC15;
LOC9 = (NIM_BOOL)0;
LOC9 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC9) goto LA10;
LOC11 = (NIM_BOOL)0;
LOC11 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC11) goto LA12;
LOC11 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA12: ;
LOC9 = !(LOC11);
LA10: ;
if (!LOC9) goto LA13;
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = r0;
r0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC15, 1);
}
LA13: ;
LOC16 = (Ttype292840*)0;
LOC16 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC16, IL64(211106232576256));
} LA2: ;
}
{
NIM_BOOL LOC19;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
if (!!(LOC19)) goto LA21;
{
while (1) {
NIM_BOOL LOC25;
TY533289 LOC27;
Ropeobj178006* LOC28;
LOC25 = (NIM_BOOL)0;
LOC25 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC25)) goto LA26;
LOC25 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA26: ;
if (!LOC25) goto LA24;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC28 = (Ropeobj178006*)0;
LOC28 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_153), LOC27, 0);
add_178482_2381377266(&r0, LOC28);
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA24: ;
}
}
LA21: ;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = isobjlackingtypefield_533515_839829468(t0);
if (!LOC31) goto LA32;
globalerror_196071_155036129((*x0).info, ((Tmsgkind191002) 4), ((NimStringDesc*) &T839829468_412));
}
LA32: ;
{
TY532811 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_413), LOC38, 2);
}
goto LA34;
LA36: ;
{
TY178507 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = genofhelper_555140_839829468(p0, dest0, r0);
r0 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_418), LOC40, 1);
}
LA34: ;
LOC41 = (Ttype292840*)0;
LOC41 = getsystype_338150_3937434831(((Ttypekind292244) 1));
putintodest_550468_839829468(p0, d0, LOC41, r0, a0.s);
}
N_NIMCALL(void, genof_555331_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
genof_555201_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (*(*n0).kindU.S6.sons->data[((NI) 2)]).typ, d0);
}
N_NIMCALL(void, rawgennew_554741_839829468)(Tcproc529021* p0, Tloc292816* a0, Ropeobj178006* sizeexpr_554745_839829468) {
Ropeobj178006* sizeexpr0;
Ttype292840* reftype0;
Tloc292816 b0;
TY535238 args0;
Ttype292840* bt0;
sizeexpr0 = sizeexpr_554745_839829468;
reftype0 = skiptypes_296099_850551059((*a0).t, IL64(211106242013440));
memset((void*)(&b0), 0, sizeof(b0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*a0).t, ((Tstorageloc292812) 3));
{
TY178507 LOC5;
Ttype292840* LOC6;
if (!sizeexpr0 == 0) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
LOC5[0] = gettypedesc_535673_839829468((*p0).module, LOC6);
sizeexpr0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_419), LOC5, 1);
}
LA3: ;
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, reftype0);
args0[2] = sizeexpr0;
{
NIM_BOOL LOC9;
TY532811 LOC21;
LOC9 = (NIM_BOOL)0;
LOC9 = ((*a0).s == ((Tstorageloc292812) 3));
if (!(LOC9)) goto LA10;
LOC9 = usesnativegc_169177_2607990831();
LA10: ;
if (!LOC9) goto LA11;
{
NIM_BOOL LOC15;
TY178507 LOC18;
LOC15 = (NIM_BOOL)0;
LOC15 = canformacycle_320123_3876443242((*a0).t);
if (!LOC15) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC18, 1);
}
goto LA13;
LA16: ;
{
TY178507 LOC20;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468(a0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC20, 1);
}
LA13: ;
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_421), args0, 3);
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdloc_538188_839829468(a0);
LOC21[1] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC21, 2);
}
goto LA7;
LA11: ;
{
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_422), args0, 3);
genassignment_539264_839829468(p0, a0, (&b0), 0);
}
LA7: ;
bt0 = skiptypes_296099_850551059((*reftype0).sons->data[((NI) 0)], IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, a0, NIM_FALSE);
}
N_NIMCALL(void, gennew_554782_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI LOC3;
Tloc292816 se0;
Ropeobj178006* LOC6;
LOC3 = (NI)0;
LOC3 = len_293081_850551059(e0);
if (!(LOC3 == ((NI) 3))) goto LA4;
memset((void*)(&se0), 0, sizeof(se0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&se0));
LOC6 = (Ropeobj178006*)0;
LOC6 = rdloc_538188_839829468((&se0));
rawgennew_554741_839829468(p0, (&a0), LOC6);
}
goto LA1;
LA4: ;
{
rawgennew_554741_839829468(p0, (&a0), NIM_NIL);
}
LA1: ;
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewfinalize_555111_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 f0;
Ttype292840* reftype0;
Ttype292840* bt0;
Ropeobj178006* ti0;
TY532811 LOC1;
TY535238 LOC2;
Ttype292840* LOC3;
Ttype292840* LOC4;
Ttype292840* LOC5;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&f0), 0, sizeof(f0));
reftype0 = (Ttype292840*)0;
bt0 = (Ttype292840*)0;
ti0 = (Ropeobj178006*)0;
reftype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&f0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), a0.t, ((Tstorageloc292812) 3));
ti0 = gentypeinfo_535941_839829468((*p0).module, reftype0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = ti0;
LOC1[1] = rdloc_538188_839829468((&f0));
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 14))- 0], ((NimStringDesc*) &T839829468_423), LOC1, 2);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = gettypedesc_535673_839829468((*p0).module, reftype0);
LOC2[1] = ti0;
LOC3 = (Ttype292840*)0;
LOC3 = lastson_295377_850551059(reftype0);
LOC4 = (Ttype292840*)0;
LOC4 = skiptypes_296099_850551059(LOC3, IL64(211106233624832));
LOC2[2] = gettypedesc_535673_839829468((*p0).module, LOC4);
b0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_424), LOC2, 3);
genassignment_539264_839829468(p0, (&a0), (&b0), 0);
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(reftype0);
bt0 = skiptypes_296099_850551059(LOC5, IL64(211106233624832));
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), bt0, (&a0), NIM_FALSE);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqaux_554795_839829468)(Tcproc529021* p0, Tloc292816* dest0, Ropeobj178006* length0) {
Ttype292840* seqtype0;
TY535238 args0;
Tloc292816 call0;
seqtype0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
memset((void*)args0, 0, sizeof(args0));
args0[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
args0[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
args0[2] = length0;
memset((void*)(&call0), 0, sizeof(call0));
initloc_532273_839829468((&call0), ((Tlockind292808) 6), (*dest0).t, ((Tstorageloc292812) 3));
{
NIM_BOOL LOC3;
TY532811 LOC15;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*dest0).s == ((Tstorageloc292812) 3));
if (!(LOC3)) goto LA4;
LOC3 = usesnativegc_169177_2607990831();
LA4: ;
if (!LOC3) goto LA5;
{
NIM_BOOL LOC9;
TY178507 LOC12;
LOC9 = (NIM_BOOL)0;
LOC9 = canformacycle_320123_3876443242((*dest0).t);
if (!LOC9) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_420), LOC12, 1);
}
goto LA7;
LA10: ;
{
TY178507 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468(dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_255), LOC14, 1);
}
LA7: ;
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_425), args0, 3);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rdloc_538188_839829468(dest0);
LOC15[1] = rdloc_538188_839829468((&call0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC15, 2);
}
goto LA1;
LA5: ;
{
call0.r = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_426), args0, 3);
genassignment_539264_839829468(p0, dest0, (&call0), 0);
}
LA1: ;
}
N_NIMCALL(void, gennewseq_554824_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 b0;
Ropeobj178006* LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
LOC1 = (Ropeobj178006*)0;
LOC1 = rdloc_538188_839829468((&b0));
gennewseqaux_554795_839829468(p0, (&a0), LOC1);
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gennewseqofcap_554836_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* seqtype0;
Tloc292816 a0;
TY535238 LOC1;
Ropeobj178006* LOC2;
seqtype0 = skiptypes_296099_850551059((*e0).typ, IL64(211106242013440));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = gettypedesc_535673_839829468((*p0).module, seqtype0);
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, seqtype0);
LOC1[2] = rdloc_538188_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_427), LOC1, 3);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, getclosuretype_535685_839829468)(Tcgen529027* m0, Ttype292840* t0, Tclosuretypekind535681 kind0) {
Ropeobj178006* result0;
Intset268030 check0;
Ropeobj178006* rettype0;
Ropeobj178006* desc0;
result0 = (Ropeobj178006*)0;
memset((void*)(&check0), 0, sizeof(check0));
chckNil((void*)(&check0));
memset((void*)(&check0), 0, sizeof(check0));
initintset_268885_2627731572((&check0));
result0 = gettempname_533598_839829468(m0);
rettype0 = (Ropeobj178006*)0;
desc0 = (Ropeobj178006*)0;
genprocparams_534115_839829468(m0, t0, &rettype0, &desc0, (&check0), !((kind0 == ((Tclosuretypekind535681) 0))), NIM_FALSE);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedtype_533451_839829468(t0);
if (!!(LOC3)) goto LA4;
{
NIM_BOOL LOC8;
TY535235 LOC12;
LOC8 = (NIM_BOOL)0;
LOC8 = !(((*t0).callconv == ((Tcallingconvention292002) 8)));
if (LOC8) goto LA9;
LOC8 = !((kind0 == ((Tclosuretypekind535681) 2)));
LA9: ;
if (!LOC8) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(Callingconvtostr_533587_839829468[((*t0).callconv)- 0]);
LOC12[1] = rettype0;
LOC12[2] = result0;
LOC12[3] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_64), LOC12, 4);
}
goto LA6;
LA10: ;
{
TY535238 LOC14;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = result0;
LOC14[1] = rettype0;
LOC14[2] = desc0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 3))- 0], ((NimStringDesc*) &T839829468_75), LOC14, 3);
}
LA6: ;
}
LA4: ;
return result0;
}
N_NIMCALL(void, gensomecast_556481_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* etyp0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
etyp0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
TY532811 LOC7;
Ropeobj178006* LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(281475111387152) &((NU64)1<<((NU)((*etyp0).kind)&63U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(((a0.flags &(1U<<((NU)(((Tlocflag292810) 0))&15U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC7[1] = addrloc_538204_839829468((&a0));
LOC8 = (Ropeobj178006*)0;
LOC8 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC7, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC8, a0.s);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC10;
TY532811 LOC14;
Ropeobj178006* LOC15;
LOC10 = (NIM_BOOL)0;
LOC10 = ((*etyp0).kind == ((Ttypekind292244) 25));
if (!(LOC10)) goto LA11;
LOC10 = ((*etyp0).callconv == ((Tcallingconvention292002) 8));
LA11: ;
if (!LOC10) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = getclosuretype_535685_839829468((*p0).module, etyp0, ((Tclosuretypekind535681) 1));
LOC14[1] = rdcharloc_538227_839829468((&a0));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC14, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC15, a0.s);
}
goto LA1;
LA12: ;
{
TY532811 LOC17;
Ropeobj178006* LOC18;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, (*e0).typ);
LOC17[1] = rdcharloc_538227_839829468((&a0));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC17, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC18, a0.s);
}
LA1: ;
}
N_NIMCALL(void, unaryexprchar_551222_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, genord_556475_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_301));
}
N_NIMCALL(void, genarraylen_555415_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tnode292802* a0;
Ttype292840* typ0;
a0 = (*e0).kindU.S6.sons->data[((NI) 1)];
{
if (!((*a0).kind == ((Tnodekind292020) 64))) goto LA3;
a0 = (*a0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
typ0 = skiptypes_296099_850551059((*a0).typ, IL64(211106240964864));
switch ((*typ0).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA8;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_431));
}
goto LA6;
LA8: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_432));
}
LA6: ;
}
break;
case ((Ttypekind292244) 29):
{
usestringh_532345_839829468((*p0).module);
{
if (!(op0 == ((Tmagic292524) 8))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_433));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_434));
}
LA12: ;
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
if (!!(LOC20)) goto LA22;
{
if (!(op0 == ((Tmagic292524) 8))) goto LA26;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_435));
}
goto LA24;
LA26: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_436));
}
LA24: ;
}
goto LA18;
LA22: ;
{
{
if (!(op0 == ((Tmagic292524) 8))) goto LA32;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_437));
}
goto LA30;
LA32: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_438));
}
LA30: ;
}
LA18: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
{
NI64 LOC40;
Ropeobj178006* LOC41;
if (!(op0 == ((Tmagic292524) 8))) goto LA38;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(typ0);
LOC41 = (Ropeobj178006*)0;
LOC41 = rope_178401_2381377266(LOC40);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC41, ((Tstorageloc292812) 0));
}
goto LA36;
LA38: ;
{
NI64 LOC43;
Ropeobj178006* LOC44;
LOC43 = (NI64)0;
LOC43 = lengthord_320007_3876443242(typ0);
LOC44 = (Ropeobj178006*)0;
LOC44 = rope_178401_2381377266(LOC43);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, ((Tstorageloc292812) 0));
}
LA36: ;
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_439));
}
break;
}
}
N_NIMCALL(void, unarystmt_550527_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
TY178507 LOC5;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!!(((*d0).k == ((Tlockind292808) 0)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_442));
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC5, 1);
}
N_NIMCALL(void, gensetlengthstr_555632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_445));
gcusage_554439_839829468(e0);
}
N_NIMCALL(void, gensetlengthseq_555500_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* t0;
NimStringDesc* setlenpattern0;
TY535235 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!!(LOC3)) goto LA5;
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_446));
}
goto LA1;
LA5: ;
{
setlenpattern0 = copyString(((NimStringDesc*) &T839829468_447));
}
LA1: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = rdloc_538188_839829468((&b0));
LOC8[2] = gettypedesc_535673_839829468((*p0).module, t0);
LOC8[3] = gettypedesc_535673_839829468((*p0).module, (*t0).sons->data[((NI) 0)]);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), setlenpattern0, LOC8, 4);
gcusage_554439_839829468(e0);
}
N_NIMCALL(Ropeobj178006*, rdsetelemloc_555662_839829468)(Tloc292816* a0, Ttype292840* settype0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = rdcharloc_538227_839829468(a0);
{
NI64 LOC3;
TY532811 LOC6;
NI64 LOC7;
LOC3 = (NI64)0;
LOC3 = firstord_320001_3876443242(settype0);
if (!!((LOC3 == IL64(0)))) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = result0;
LOC7 = (NI64)0;
LOC7 = firstord_320001_3876443242(settype0);
LOC6[1] = rope_178401_2381377266(LOC7);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_448), LOC6, 2);
}
LA4: ;
return result0;
}
N_NIMCALL(void, binarystmtinexcl_555858_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
LOC1[1] = rdsetelemloc_555662_839829468((&b0), a0.t);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), frmt0, LOC1, 2);
}
N_NIMCALL(void, binaryexprchar_550809_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NimStringDesc* frmt0) {
Tloc292816 a0;
Tloc292816 b0;
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdcharloc_538227_839829468((&a0));
LOC1[1] = rdcharloc_538227_839829468((&b0));
LOC2 = (Ropeobj178006*)0;
LOC2 = ropecg_532407_839829468((*p0).module, frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(NIM_BOOL, fewcmps_555803_839829468)(Tnode292802* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
if (!!(((*s0).kind == ((Tnodekind292020) 39)))) goto LA3;
internalerror_196100_155036129((*s0).info, ((NimStringDesc*) &T839829468_463));
}
LA3: ;
{
NIM_BOOL LOC7;
NI64 LOC8;
LOC7 = (NIM_BOOL)0;
LOC8 = (NI64)0;
LOC8 = getsize_320135_3876443242((*s0).typ);
LOC7 = (LOC8 <= ((NI64) (intsize_176641_4151366050)));
if (!(LOC7)) goto LA9;
LOC7 = (((*s0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0);
LA9: ;
if (!LOC7) goto LA10;
result0 = NIM_FALSE;
}
goto LA5;
LA10: ;
{
Ttype292840* LOC13;
LOC13 = (Ttype292840*)0;
LOC13 = elemtype_320394_3876443242((*s0).typ);
if (!((IL64(62277025792) &((NU64)1<<((NU)((*LOC13).kind)&63U)))!=0)) goto LA14;
result0 = NIM_TRUE;
}
goto LA5;
LA14: ;
{
NI LOC17;
LOC17 = (NI)0;
LOC17 = sonslen_295351_850551059(s0);
result0 = (LOC17 <= ((NI) 8));
}
LA5: ;
return result0;
}
N_NIMCALL(void, binaryexprin_555837_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0, NimStringDesc* frmt0) {
TY532811 LOC1;
Ropeobj178006* LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*a0)));
LOC1[1] = rdsetelemloc_555662_839829468((&(*b0)), (*a0).t);
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(frmt0, LOC1, 2);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC2, ((Tstorageloc292812) 0));
}
N_NIMCALL(void, geninexpraux_553496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* a0, Tloc292816* b0, Tloc292816* d0) {
Ttype292840* LOC1;
NI64 LOC2;
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC2 = (NI64)0;
LOC2 = getsize_320135_3876443242(LOC1);
switch (((NI) (LOC2))) {
case ((NI) 1):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_467));
}
break;
case ((NI) 2):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_468));
}
break;
case ((NI) 4):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_469));
}
break;
case ((NI) 8):
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_470));
}
break;
default:
{
binaryexprin_555837_839829468(p0, e0, a0, b0, d0, ((NimStringDesc*) &T839829468_471));
}
break;
}
}
N_NIMCALL(void, geninop_556009_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 x0;
Tloc292816 y0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
{
NIM_BOOL LOC3;
Tnode292802* ea0;
NI length0;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 39));
if (!(LOC3)) goto LA4;
LOC3 = fewcmps_555803_839829468((*e0).kindU.S6.sons->data[((NI) 1)]);
LA4: ;
if (!LOC3) goto LA5;
{
if (!((*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 70) || (*(*e0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 69))) goto LA9;
ea0 = (*(*e0).kindU.S6.sons->data[((NI) 2)]).kindU.S6.sons->data[((NI) 0)];
}
goto LA7;
LA9: ;
{
ea0 = (*e0).kindU.S6.sons->data[((NI) 2)];
}
LA7: ;
initlocexpr_539283_839829468(p0, ea0, (&a0));
initloc_532273_839829468((&b0), ((Tlockind292808) 6), (*e0).typ, ((Tstorageloc292812) 0));
b0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_118));
length0 = sonslen_295351_850551059((*e0).kindU.S6.sons->data[((NI) 1)]);
{
NI i_556061_839829468;
NI HEX3Atmp_556412_839829468;
NI res_556415_839829468;
i_556061_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)0;
HEX3Atmp_556412_839829468 = (NI)(length0 - ((NI) 1));
res_556415_839829468 = ((NI) 0);
{
while (1) {
if (!(res_556415_839829468 <= HEX3Atmp_556412_839829468)) goto LA14;
i_556061_839829468 = res_556415_839829468;
{
TY535238 LOC19;
if (!((*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kind == ((Tnodekind292020) 44))) goto LA17;
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdcharloc_538227_839829468((&a0));
LOC19[1] = rdcharloc_538227_839829468((&x0));
LOC19[2] = rdcharloc_538227_839829468((&y0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_464), LOC19, 3);
}
goto LA15;
LA17: ;
{
TY532811 LOC21;
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[i_556061_839829468], (&x0));
memset((void*)LOC21, 0, sizeof(LOC21));
LOC21[0] = rdcharloc_538227_839829468((&a0));
LOC21[1] = rdcharloc_538227_839829468((&x0));
addf_179205_2381377266(&b0.r, ((NimStringDesc*) &T839829468_465), LOC21, 2);
}
LA15: ;
{
if (!(i_556061_839829468 < (NI)(length0 - ((NI) 1)))) goto LA24;
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_466));
}
LA24: ;
res_556415_839829468 += ((NI) 1);
} LA14: ;
}
}
add_178487_2381377266(&b0.r, ((NimStringDesc*) &T839829468_117));
putintodest_550468_839829468(p0, d0, (*e0).typ, b0.r, ((Tstorageloc292812) 0));
}
goto LA1;
LA5: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
geninexpraux_553496_839829468(p0, e0, (&a0), (&b0), d0);
}
LA1: ;
}
N_NIMCALL(void, gensetop_556419_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 i0;
Ttype292840* settype0;
NI size0;
NI64 LOC1;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&i0), 0, sizeof(i0));
settype0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106240964864));
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242(settype0);
size0 = ((NI) (LOC1));
switch (size0) {
case ((NI) 1):
case ((NI) 2):
case ((NI) 4):
case ((NI) 8):
{
switch (op0) {
case ((Tmagic292524) 39):
{
NimStringDesc* ts0;
NimStringDesc* LOC4;
NimStringDesc* LOC5;
NimStringDesc* LOC6;
LOC4 = (NimStringDesc*)0;
LOC5 = (NimStringDesc*)0;
LOC5 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC4 = rawNewString(LOC5->Sup.len + 2);
appendString(LOC4, ((NimStringDesc*) &T839829468_45));
appendString(LOC4, LOC5);
ts0 = LOC4;
LOC6 = (NimStringDesc*)0;
LOC6 = rawNewString(ts0->Sup.len + ts0->Sup.len + 35);
appendString(LOC6, ((NimStringDesc*) &T839829468_449));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_450));
appendString(LOC6, ts0);
appendString(LOC6, ((NimStringDesc*) &T839829468_451));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC6);
}
break;
case ((Tmagic292524) 40):
{
NimStringDesc* ts0;
NimStringDesc* LOC8;
NimStringDesc* LOC9;
NimStringDesc* LOC10;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nimIntToStr((NI)(size0 * ((NI) 8)));
LOC8 = rawNewString(LOC9->Sup.len + 2);
appendString(LOC8, ((NimStringDesc*) &T839829468_45));
appendString(LOC8, LOC9);
ts0 = LOC8;
LOC10 = (NimStringDesc*)0;
LOC10 = rawNewString(ts0->Sup.len + ts0->Sup.len + 42);
appendString(LOC10, ((NimStringDesc*) &T839829468_452));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_453));
appendString(LOC10, ts0);
appendString(LOC10, ((NimStringDesc*) &T839829468_454));
binarystmtinexcl_555858_839829468(p0, e0, d0, LOC10);
}
break;
case ((Tmagic292524) 41):
{
{
if (!(size0 <= ((NI) 4))) goto LA14;
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_455));
}
goto LA12;
LA14: ;
{
unaryexprchar_551222_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_456));
}
LA12: ;
}
break;
case ((Tmagic292524) 133):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_457));
}
break;
case ((Tmagic292524) 132):
{
binaryexprchar_550809_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_458));
}
break;
case ((Tmagic292524) 131):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_341));
}
break;
case ((Tmagic292524) 134):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_459));
}
break;
case ((Tmagic292524) 135):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_460));
}
break;
case ((Tmagic292524) 136):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_461));
}
break;
case ((Tmagic292524) 137):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_462));
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_472));
}
break;
}
}
break;
default:
{
switch (op0) {
case ((Tmagic292524) 39):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_473));
}
break;
case ((Tmagic292524) 40):
{
binarystmtinexcl_555858_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_474));
}
break;
case ((Tmagic292524) 41):
{
NimStringDesc* LOC30;
NimStringDesc* LOC31;
LOC30 = (NimStringDesc*)0;
LOC31 = (NimStringDesc*)0;
LOC31 = nimIntToStr(size0);
LOC30 = rawNewString(LOC31->Sup.len + 14);
appendString(LOC30, ((NimStringDesc*) &T839829468_475));
appendString(LOC30, LOC31);
appendChar(LOC30, 41);
unaryexprchar_551222_839829468(p0, e0, d0, LOC30);
}
break;
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
{
Ttype292840* LOC33;
TY536475 LOC39;
LOC33 = (Ttype292840*)0;
LOC33 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC33, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
Ttype292840* LOC38;
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
LOC38 = (Ttype292840*)0;
LOC38 = getsystype_338150_3937434831(((Ttypekind292244) 1));
gettemp_537032_839829468(p0, LOC38, d0, NIM_FALSE);
}
LA36: ;
memset((void*)LOC39, 0, sizeof(LOC39));
LOC39[0] = rdloc_538188_839829468((&i0));
LOC39[1] = rope_178401_2381377266(((NI64) (size0)));
LOC39[2] = rdloc_538188_839829468((&(*d0)));
LOC39[3] = rdloc_538188_839829468((&a0));
LOC39[4] = rdloc_538188_839829468((&b0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), lookupopr_556426_839829468[(op0)- 132], LOC39, 5);
}
break;
case ((Tmagic292524) 131):
{
NimStringDesc* LOC41;
NimStringDesc* LOC42;
usestringh_532345_839829468((*p0).module);
LOC41 = (NimStringDesc*)0;
LOC42 = (NimStringDesc*)0;
LOC42 = nimIntToStr(size0);
LOC41 = rawNewString(LOC42->Sup.len + 21);
appendString(LOC41, ((NimStringDesc*) &T839829468_481));
appendString(LOC41, LOC42);
appendString(LOC41, ((NimStringDesc*) &T839829468_482));
binaryexprchar_550809_839829468(p0, e0, d0, LOC41);
}
break;
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 137):
{
Ttype292840* LOC44;
TY536847 LOC49;
LOC44 = (Ttype292840*)0;
LOC44 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC44, (&i0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA47;
gettemp_537032_839829468(p0, a0.t, d0, NIM_FALSE);
}
LA47: ;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&i0));
LOC49[1] = rope_178401_2381377266(((NI64) (size0)));
LOC49[2] = rdloc_538188_839829468((&(*d0)));
LOC49[3] = rdloc_538188_839829468((&a0));
LOC49[4] = rdloc_538188_839829468((&b0));
LOC49[5] = rope_178277_2381377266(lookupopr_556426_839829468[(op0)- 132]);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_483), LOC49, 6);
}
break;
case ((Tmagic292524) 148):
{
geninop_556009_839829468(p0, e0, d0);
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_484));
}
break;
}
}
break;
}
}
static N_INLINE(Ropeobj178006*, genargstringtocstring_539776_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
TY178507 LOC1;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC1, 1);
return result0;
}
N_NIMCALL(Ropeobj178006*, openarrayloc_539665_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
Tnode292802* q0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
q0 = skipconv_328882_3876443242(n0);
{
Tmagic292524 LOC3;
Tloc292816 b0;
Tloc292816 c0;
Tnode292802* LOC6;
Tnode292802* LOC7;
Tnode292802* LOC8;
NimStringDesc* fmt0;
Ttype292840* LOC9;
TY535238 LOC25;
LOC3 = (Tmagic292524)0;
LOC3 = getmagic_318502_2616423590(q0);
if (!(LOC3 == ((Tmagic292524) 139))) goto LA4;
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&c0), 0, sizeof(c0));
LOC6 = (Tnode292802*)0;
LOC6 = HEX5BHEX5D_293238_850551059(q0, ((NI) 1));
initlocexpr_539283_839829468(p0, LOC6, (&a0));
LOC7 = (Tnode292802*)0;
LOC7 = HEX5BHEX5D_293238_850551059(q0, ((NI) 2));
initlocexpr_539283_839829468(p0, LOC7, (&b0));
LOC8 = (Tnode292802*)0;
LOC8 = HEX5BHEX5D_293238_850551059(q0, ((NI) 3));
initlocexpr_539283_839829468(p0, LOC8, (&c0));
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059(a0.t, IL64(211106243062016));
switch ((*LOC9).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
fmt0 = copyString(((NimStringDesc*) &T839829468_486));
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC14;
Ttype292840* LOC15;
NIM_BOOL LOC17;
LOC14 = (NIM_BOOL)0;
LOC15 = (Ttype292840*)0;
LOC15 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC14 = ((*LOC15).kind == ((Ttypekind292244) 23));
if (!(LOC14)) goto LA16;
LOC17 = (NIM_BOOL)0;
LOC17 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC17) goto LA18;
LOC17 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA18: ;
LOC14 = !(LOC17);
LA16: ;
if (!LOC14) goto LA19;
fmt0 = copyString(((NimStringDesc*) &T839829468_487));
}
goto LA12;
LA19: ;
{
fmt0 = copyString(((NimStringDesc*) &T839829468_488));
}
LA12: ;
}
break;
default:
{
NimStringDesc* LOC23;
NimStringDesc* LOC24;
LOC23 = (NimStringDesc*)0;
LOC24 = (NimStringDesc*)0;
LOC24 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC23 = rawNewString(LOC24->Sup.len + 14);
appendString(LOC23, ((NimStringDesc*) &T839829468_489));
appendString(LOC23, LOC24);
internalerror_196113_155036129(LOC23);
fmt0 = copyString(((NimStringDesc*) &T839829468_490));
}
break;
}
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = rdloc_538188_839829468((&b0));
LOC25[2] = rdloc_538188_839829468((&c0));
result0 = HEX25_178905_2381377266(fmt0, LOC25, 3);
}
goto LA1;
LA4: ;
{
Ttype292840* LOC27;
initlocexpr_539283_839829468(p0, n0, (&a0));
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
switch ((*LOC27).kind) {
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY178507 LOC29;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_378), LOC29, 1);
}
break;
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
{
NIM_BOOL LOC33;
Ttype292840* LOC34;
NIM_BOOL LOC36;
TY532811 LOC40;
LOC33 = (NIM_BOOL)0;
LOC34 = (Ttype292840*)0;
LOC34 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC33 = ((*LOC34).kind == ((Ttypekind292244) 23));
if (!(LOC33)) goto LA35;
LOC36 = (NIM_BOOL)0;
LOC36 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC36) goto LA37;
LOC36 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA37: ;
LOC33 = !(LOC36);
LA35: ;
if (!LOC33) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = rdloc_538188_839829468((&a0));
LOC40[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC40, 2);
}
goto LA31;
LA38: ;
{
TY532811 LOC42;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC42[0] = rdloc_538188_839829468((&a0));
LOC42[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_379), LOC42, 2);
}
LA31: ;
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC44;
NI64 LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&a0));
LOC45 = (NI64)0;
LOC45 = lengthord_320007_3876443242(a0.t);
LOC44[1] = rope_178401_2381377266(LOC45);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC44, 2);
}
break;
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
{
Ttype292840* LOC47;
LOC47 = (Ttype292840*)0;
LOC47 = lastson_295377_850551059(a0.t);
switch ((*LOC47).kind) {
case ((Ttypekind292244) 28):
case ((Ttypekind292244) 24):
{
TY532811 LOC49;
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = lenfield_539305_839829468(p0);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_491), LOC49, 2);
}
break;
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY532811 LOC51;
Ttype292840* LOC52;
NI64 LOC53;
memset((void*)LOC51, 0, sizeof(LOC51));
LOC51[0] = rdloc_538188_839829468((&a0));
LOC52 = (Ttype292840*)0;
LOC52 = lastson_295377_850551059(a0.t);
LOC53 = (NI64)0;
LOC53 = lengthord_320007_3876443242(LOC52);
LOC51[1] = rope_178401_2381377266(LOC53);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_380), LOC51, 2);
}
break;
default:
{
NimStringDesc* LOC55;
NimStringDesc* LOC56;
LOC55 = (NimStringDesc*)0;
LOC56 = (NimStringDesc*)0;
LOC56 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC55 = rawNewString(LOC56->Sup.len + 14);
appendString(LOC55, ((NimStringDesc*) &T839829468_489));
appendString(LOC55, LOC56);
internalerror_196113_155036129(LOC55);
}
break;
}
}
break;
default:
{
NimStringDesc* LOC58;
NimStringDesc* LOC59;
LOC58 = (NimStringDesc*)0;
LOC59 = (NimStringDesc*)0;
LOC59 = typetostring_320017_3876443242(a0.t, ((Tprefereddesc320011) 0));
LOC58 = rawNewString(LOC59->Sup.len + 14);
appendString(LOC58, ((NimStringDesc*) &T839829468_489));
appendString(LOC58, LOC59);
internalerror_196113_155036129(LOC58);
}
break;
}
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genarg_539787_839829468)(Tcproc529021* p0, Tnode292802* n_539790_839829468, Tsym292834* param0, Tnode292802* call0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n_539790_839829468).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n_539790_839829468);
}
goto LA1;
LA3: ;
{
Ttype292840* LOC6;
Tnode292802* n0;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059((*param0).typ, IL64(211106240964864));
if (!((IL64(281475110928384) &((NU64)1<<((NU)((*LOC6).kind)&63U)))!=0)) goto LA7;
{
if (!!(((*n_539790_839829468).kind == ((Tnodekind292020) 64)))) goto LA11;
n0 = n_539790_839829468;
}
goto LA9;
LA11: ;
{
n0 = (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)];
}
LA9: ;
result0 = openarrayloc_539665_839829468(p0, n0);
}
goto LA1;
LA7: ;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ccgintroducedptr_533611_839829468(param0);
if (!LOC15) goto LA16;
initlocexpr_539283_839829468(p0, n_539790_839829468, (&a0));
result0 = addrloc_538204_839829468((&a0));
}
goto LA1;
LA16: ;
{
NIM_BOOL LOC19;
NIM_BOOL LOC20;
NIM_BOOL LOC21;
Tnode292802* callee0;
LOC19 = (NIM_BOOL)0;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC21) goto LA22;
LOC21 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC20 = ((*(*param0).typ).kind == ((Ttypekind292244) 23));
LA23: ;
LOC19 = LOC20;
if (!(LOC19)) goto LA24;
LOC19 = ((*n_539790_839829468).kind == ((Tnodekind292020) 64));
LA24: ;
if (!LOC19) goto LA25;
initlocexprsingleuse_539289_839829468(p0, (*n_539790_839829468).kindU.S6.sons->data[((NI) 0)], (&a0));
callee0 = (*call0).kindU.S6.sons->data[((NI) 0)];
{
NIM_BOOL LOC29;
NIM_BOOL LOC30;
LOC29 = (NIM_BOOL)0;
LOC30 = (NIM_BOOL)0;
LOC30 = ((*callee0).kind == ((Tnodekind292020) 3));
if (!(LOC30)) goto LA31;
LOC30 = ((134283296 & (*(*callee0).kindU.S4.sym).flags) == 32);
LA31: ;
LOC29 = LOC30;
if (!(LOC29)) goto LA32;
LOC29 = !(((72 & (*(*callee0).kindU.S4.sym).loc.flags) == 0));
LA32: ;
if (!LOC29) goto LA33;
result0 = addrloc_538204_839829468((&a0));
}
goto LA27;
LA33: ;
{
result0 = rdloc_538188_839829468((&a0));
}
LA27: ;
}
goto LA1;
LA25: ;
{
initlocexprsingleuse_539289_839829468(p0, n_539790_839829468, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genargnoparam_539938_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tloc292816 a0;
result0 = (Ropeobj178006*)0;
memset((void*)(&a0), 0, sizeof(a0));
{
if (!((*n0).kind == ((Tnodekind292020) 71))) goto LA3;
result0 = genargstringtocstring_539776_839829468(p0, n0);
}
goto LA1;
LA3: ;
{
initlocexprsingleuse_539289_839829468(p0, n0, (&a0));
result0 = rdloc_538188_839829468((&a0));
}
LA1: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getrawproctype_540459_839829468)(Tcproc529021* p0, Ttype292840* t0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getclosuretype_535685_839829468((*p0).module, t0, ((Tclosuretypekind535681) 0));
return result0;
}
N_NIMCALL(NIM_BOOL, leftappearsonrightside_539329_839829468)(Tnode292802* le0, Tnode292802* ri0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!!((le0 == NIM_NIL))) goto LA3;
{
NI i_539364_839829468;
NI HEX3Atmp_539376_839829468;
NI LOC6;
NI res_539379_839829468;
i_539364_839829468 = (NI)0;
HEX3Atmp_539376_839829468 = (NI)0;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri0);
HEX3Atmp_539376_839829468 = (LOC6 - 1);
res_539379_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* r0;
if (!(res_539379_839829468 <= HEX3Atmp_539376_839829468)) goto LA8;
i_539364_839829468 = res_539379_839829468;
r0 = HEX5BHEX5D_293238_850551059(ri0, i_539364_839829468);
{
Tanalysisresult473003 LOC11;
LOC11 = (Tanalysisresult473003)0;
LOC11 = ispartof_473340_788060399(le0, r0);
if (!!((LOC11 == ((Tanalysisresult473003) 0)))) goto LA12;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA12: ;
res_539379_839829468 += ((NI) 1);
} LA8: ;
}
}
}
LA3: ;
}BeforeRet: ;
return result0;
}
static N_INLINE(NIM_BOOL, hasnoinit_539383_839829468)(Tnode292802* call0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((*(*call0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC1)) goto LA2;
LOC1 = (((*(*(*call0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, resetloc_538350_839829468)(Tcproc529021* p0, Tloc292816* loc0) {
NIM_BOOL containsgcref0;
Ttype292840* typ0;
{ containsgcref0 = containsgarbagecollectedref_320117_3876443242((*loc0).t);
typ0 = skiptypes_296099_850551059((*loc0).t, IL64(211106242013440));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = isimportedcpptype_533478_839829468(typ0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscomplexvaluetype_538317_839829468(typ0);
if (!!(LOC8)) goto LA9;
{
Tloc292816 nilloc0;
if (!containsgcref0) goto LA13;
memset((void*)(&nilloc0), 0, sizeof(nilloc0));
initloc_532273_839829468((&nilloc0), ((Tlockind292808) 1), (*loc0).t, ((Tstorageloc292812) 2));
nilloc0.r = rope_178277_2381377266(((NimStringDesc*) &T839829468_174));
genrefassign_538311_839829468(p0, (&(*loc0)), (&nilloc0), 8);
}
goto LA11;
LA13: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC16, 1);
}
LA11: ;
}
goto LA6;
LA9: ;
{
{
TY178507 LOC22;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 6))&31U)))!=0)) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = addrloc_538204_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_495), LOC22, 1);
}
LA20: ;
{
TY532811 LOC27;
if (!!(((*loc0).s == ((Tstorageloc292812) 2)))) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = addrloc_538204_839829468((&(*loc0)));
LOC27[1] = gentypeinfo_535941_839829468((*p0).module, (*loc0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC27, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
goto LA23;
LA25: ;
{
TY532811 LOC29;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = addrloc_538204_839829468((&(*loc0)));
LOC29[1] = rdloc_538188_839829468((&(*loc0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_152), LOC29, 2);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 2), (*loc0).t, (&(*loc0)), NIM_TRUE);
}
LA23: ;
}
LA6: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, addcomma_540464_839829468)(Ropeobj178006* r0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
if (!(r0 == NIM_NIL)) goto LA3;
result0 = r0;
}
goto LA1;
LA3: ;
{
TY533289 LOC6;
Ropeobj178006* LOC7;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC7 = (Ropeobj178006*)0;
LOC7 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC6, 0);
result0 = HEX26_178418_2381377266(r0, LOC7);
}
LA1: ;
return result0;
}
N_NIMCALL(void, genclosurecall_540452_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
Ttype292840* typ0;
NI length0;
Ropeobj178006* rawproc0;
NimStringDesc* callpattern0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
pl0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540613_839829468;
NI HEX3Atmp_541214_839829468;
NI res_541217_839829468;
i_540613_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)0;
HEX3Atmp_541214_839829468 = (NI)(length0 - ((NI) 1));
res_541217_839829468 = ((NI) 1);
{
while (1) {
if (!(res_541217_839829468 <= HEX3Atmp_541214_839829468)) goto LA3;
i_540613_839829468 = res_541217_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540613_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540613_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((pl0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(&pl0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((pl0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(&pl0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540613_839829468]);
add_178482_2381377266(&pl0, LOC28);
}
LA4: ;
res_541217_839829468 += ((NI) 1);
} LA3: ;
}
}
rawproc0 = getrawproctype_540459_839829468(p0, typ0);
{
if (!(((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 14))&31U)))!=0)) goto LA31;
callpattern0 = copyString(((NimStringDesc*) &T839829468_492));
}
goto LA29;
LA31: ;
{
callpattern0 = copyString(((NimStringDesc*) &T839829468_493));
}
LA29: ;
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA36;
{
NIM_BOOL LOC40;
LOC40 = (NIM_BOOL)0;
LOC40 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC40) goto LA41;
{
NI LOC45;
TY533289 LOC48;
Ropeobj178006* LOC49;
LOC45 = (NI)0;
LOC45 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC45)) goto LA46;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC48, 0);
add_178482_2381377266(&pl0, LOC49);
}
LA46: ;
{
NIM_BOOL LOC52;
NIM_BOOL LOC54;
Ropeobj178006* LOC67;
NimStringDesc* LOC68;
TY535235 LOC69;
LOC52 = (NIM_BOOL)0;
LOC52 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC52) goto LA53;
LOC54 = (NIM_BOOL)0;
LOC54 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC52 = !(LOC54);
LA53: ;
if (!LOC52) goto LA55;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA59;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA57;
LA59: ;
{
NIM_BOOL LOC62;
NIM_BOOL LOC64;
LOC62 = (NIM_BOOL)0;
LOC62 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC62)) goto LA63;
LOC64 = (NIM_BOOL)0;
LOC64 = hasnoinit_539383_839829468(ri0);
LOC62 = !(LOC64);
LA63: ;
if (!LOC62) goto LA65;
resetloc_538350_839829468(p0, d0);
}
goto LA57;
LA65: ;
LA57: ;
LOC67 = (Ropeobj178006*)0;
LOC67 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC67);
LOC68 = (NimStringDesc*)0;
LOC68 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC68, callpattern0);
appendString(LOC68, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = op0.r;
LOC69[1] = pl0;
LOC69[2] = addcomma_540464_839829468(pl0);
LOC69[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC68, LOC69, 4);
}
goto LA50;
LA55: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC71;
NimStringDesc* LOC72;
TY535235 LOC73;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC71 = (Ropeobj178006*)0;
LOC71 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC71);
LOC72 = (NimStringDesc*)0;
LOC72 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC72, callpattern0);
appendString(LOC72, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC73, 0, sizeof(LOC73));
LOC73[0] = op0.r;
LOC73[1] = pl0;
LOC73[2] = addcomma_540464_839829468(pl0);
LOC73[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC72, LOC73, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA50: ;
}
goto LA38;
LA41: ;
{
Tloc292816 list0;
TY535235 LOC79;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA77;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA77: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
memset((void*)LOC79, 0, sizeof(LOC79));
LOC79[0] = op0.r;
LOC79[1] = pl0;
LOC79[2] = addcomma_540464_839829468(pl0);
LOC79[3] = rawproc0;
list0.r = HEX25_178905_2381377266(callpattern0, LOC79, 4);
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA38: ;
}
goto LA34;
LA36: ;
{
NimStringDesc* LOC81;
TY535235 LOC82;
LOC81 = (NimStringDesc*)0;
LOC81 = rawNewString(callpattern0->Sup.len + 3);
appendString(LOC81, callpattern0);
appendString(LOC81, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC82, 0, sizeof(LOC82));
LOC82[0] = op0.r;
LOC82[1] = pl0;
LOC82[2] = addcomma_540464_839829468(pl0);
LOC82[3] = rawproc0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC81, LOC82, 4);
}
LA34: ;
}
N_NIMCALL(Ropeobj178006*, genotherarg_539277_839829468)(Tcproc529021* p0, Tnode292802* ri0, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
Tnode292802* paramtype0;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!(i0 < LOC3)) goto LA4;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i0];
{
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!LOC8) goto LA9;
result0 = NIM_NIL;
}
goto LA6;
LA9: ;
{
NIM_BOOL LOC12;
Tnode292802* LOC16;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*typ0).sons->data[i0]).kind == ((Ttypekind292244) 23));
if (!(LOC12)) goto LA13;
LOC12 = ((*(*ri0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 64));
LA13: ;
if (!LOC12) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059((*ri0).kindU.S6.sons->data[i0], ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC16);
}
goto LA6;
LA14: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA6: ;
}
goto LA1;
LA4: ;
{
{
if (!!((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0))) goto LA21;
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_501));
result0 = NIM_NIL;
}
goto LA19;
LA21: ;
{
result0 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i0]);
}
LA19: ;
}
LA1: ;
return result0;
}
N_NIMCALL(Tnode292802*, skipaddrderef_541433_839829468)(Tnode292802* node0) {
Tnode292802* result0;
Tnode292802* n0;
NIM_BOOL isaddr0;
{ result0 = (Tnode292802*)0;
n0 = node0;
isaddr0 = NIM_FALSE;
switch ((*n0).kind) {
case ((Tnodekind292020) 63):
case ((Tnodekind292020) 64):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
isaddr0 = NIM_TRUE;
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
break;
default:
{
result0 = n0;
goto BeforeRet;
}
break;
}
{
if (!((*n0).kind == ((Tnodekind292020) 66))) goto LA6;
n0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
LA6: ;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isaddr0;
if (!(LOC10)) goto LA11;
LOC10 = ((*n0).kind == ((Tnodekind292020) 47) || (*n0).kind == ((Tnodekind292020) 65));
LA11: ;
if (!LOC10) goto LA12;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA12: ;
{
if (!((*n0).kind == ((Tnodekind292020) 63) || (*n0).kind == ((Tnodekind292020) 64))) goto LA15;
result0 = (*n0).kindU.S6.sons->data[((NI) 0)];
}
goto LA8;
LA15: ;
{
result0 = node0;
}
LA8: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genthisarg_541475_839829468)(Tcproc529021* p0, Tnode292802* ri_541478_839829468, NI i0, Ttype292840* typ0) {
Ropeobj178006* result0;
Tnode292802* ri0;
Ttype292840* t0;
result0 = (Ropeobj178006*)0;
{
NI LOC3;
NimStringDesc* LOC6;
LOC3 = (NI)0;
LOC3 = sonslen_295327_850551059(typ0);
if (!!((i0 < LOC3))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_503);
internalerror_196113_155036129(LOC6);
}
LA4: ;
ri0 = HEX5BHEX5D_293238_850551059(ri_541478_839829468, i0);
{
while (1) {
if (!((*ri0).kind == ((Tnodekind292020) 66))) goto LA8;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
} LA8: ;
}
t0 = skiptypes_296099_850551059((*typ0).sons->data[i0], 2048);
{
Tnode292802* x0;
if (!((*t0).kind == ((Ttypekind292244) 23))) goto LA11;
{
if (!((*ri0).kind == ((Tnodekind292020) 64))) goto LA15;
x0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
goto LA13;
LA15: ;
{
x0 = ri0;
}
LA13: ;
{
if (!((*(*x0).typ).kind == ((Ttypekind292244) 21))) goto LA20;
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA20: ;
{
NIM_BOOL LOC23;
Tnode292802* LOC25;
Tnode292802* LOC28;
LOC23 = (NIM_BOOL)0;
LOC23 = ((*x0).kind == ((Tnodekind292020) 65) || (*x0).kind == ((Tnodekind292020) 47));
if (!(LOC23)) goto LA24;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
LOC23 = ((*(*LOC25).typ).kind == ((Ttypekind292244) 21));
LA24: ;
if (!LOC23) goto LA26;
LOC28 = (Tnode292802*)0;
LOC28 = HEX5BHEX5D_293238_850551059(x0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC28);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
goto LA18;
LA26: ;
{
result0 = genargnoparam_539938_839829468(p0, x0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA18: ;
}
goto LA9;
LA11: ;
{
if (!((*t0).kind == ((Ttypekind292244) 21))) goto LA31;
{
Tnode292802* LOC37;
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA35;
LOC37 = (Tnode292802*)0;
LOC37 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
result0 = genargnoparam_539938_839829468(p0, LOC37);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
goto LA33;
LA35: ;
{
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_504));
}
LA33: ;
}
goto LA9;
LA31: ;
{
ri0 = skipaddrderef_541433_839829468(ri0);
{
if (!((*ri0).kind == ((Tnodekind292020) 63) || (*ri0).kind == ((Tnodekind292020) 64))) goto LA42;
ri0 = HEX5BHEX5D_293238_850551059(ri0, ((NI) 0));
}
LA42: ;
result0 = genargnoparam_539938_839829468(p0, ri0);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_257));
}
LA9: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, genpatterncall_541699_839829468)(Tcproc529021* p0, Tnode292802* ri_541702_839829468, NimStringDesc* pat0, Ttype292840* typ_541704_839829468) {
Ropeobj178006* result0;
NI i0;
NI j0;
result0 = (Ropeobj178006*)0;
i0 = ((NI) 0);
j0 = ((NI) 1);
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA2;
switch (((NU8)(pat0->data[i0]))) {
case 64:
{
{
NI LOC6;
Ropeobj178006* LOC9;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(ri_541702_839829468);
if (!(j0 < LOC6)) goto LA7;
LOC9 = (Ropeobj178006*)0;
LOC9 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC9);
{
NI k_541728_839829468;
NI HEX3Atmp_541904_839829468;
NI HEX3Atmp_541905_839829468;
NI LOC11;
NI res_541908_839829468;
k_541728_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)0;
HEX3Atmp_541905_839829468 = (NI)0;
HEX3Atmp_541904_839829468 = (NI)(j0 + ((NI) 1));
LOC11 = (NI)0;
LOC11 = len_293081_850551059(ri_541702_839829468);
HEX3Atmp_541905_839829468 = (LOC11 - 1);
res_541908_839829468 = HEX3Atmp_541904_839829468;
{
while (1) {
TY533289 LOC14;
Ropeobj178006* LOC15;
Ropeobj178006* LOC16;
if (!(res_541908_839829468 <= HEX3Atmp_541905_839829468)) goto LA13;
k_541728_839829468 = res_541908_839829468;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC15 = (Ropeobj178006*)0;
LOC15 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC14, 0);
add_178482_2381377266(&result0, LOC15);
LOC16 = (Ropeobj178006*)0;
LOC16 = genotherarg_539277_839829468(p0, ri_541702_839829468, k_541728_839829468, typ_541704_839829468);
add_178482_2381377266(&result0, LOC16);
res_541908_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA7: ;
i0 += ((NI) 1);
}
break;
case 35:
{
{
Tnode292802* ri0;
if (!(((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(43)) || ((NU8)(pat0->data[(NI)(i0 + ((NI) 1))])) == ((NU8)(64)))) goto LA20;
ri0 = HEX5BHEX5D_293238_850551059(ri_541702_839829468, j0);
{
Ttype292840* typ0;
TY533289 LOC31;
Ropeobj178006* LOC32;
TY533289 LOC46;
Ropeobj178006* LOC47;
if (!((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32))) goto LA24;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
Ropeobj178006* LOC30;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(43))) goto LA28;
LOC30 = (Ropeobj178006*)0;
LOC30 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)]);
add_178482_2381377266(&result0, LOC30);
}
LA28: ;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC31, 0);
add_178482_2381377266(&result0, LOC32);
{
NI LOC35;
Ropeobj178006* LOC38;
LOC35 = (NI)0;
LOC35 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC35)) goto LA36;
LOC38 = (Ropeobj178006*)0;
LOC38 = genotherarg_539277_839829468(p0, ri0, ((NI) 1), typ0);
add_178482_2381377266(&result0, LOC38);
}
LA36: ;
{
NI k_541793_839829468;
NI HEX3Atmp_541915_839829468;
NI HEX3Atmp_541916_839829468;
NI LOC40;
NI res_541919_839829468;
k_541793_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)0;
HEX3Atmp_541916_839829468 = (NI)0;
HEX3Atmp_541915_839829468 = (NI)(j0 + ((NI) 1));
LOC40 = (NI)0;
LOC40 = len_293081_850551059(ri0);
HEX3Atmp_541916_839829468 = (LOC40 - 1);
res_541919_839829468 = HEX3Atmp_541915_839829468;
{
while (1) {
TY533289 LOC43;
Ropeobj178006* LOC44;
Ropeobj178006* LOC45;
if (!(res_541919_839829468 <= HEX3Atmp_541916_839829468)) goto LA42;
k_541793_839829468 = res_541919_839829468;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC44 = (Ropeobj178006*)0;
LOC44 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC43, 0);
add_178482_2381377266(&result0, LOC44);
LOC45 = (Ropeobj178006*)0;
LOC45 = genotherarg_539277_839829468(p0, ri0, k_541793_839829468, typ0);
add_178482_2381377266(&result0, LOC45);
res_541919_839829468 += ((NI) 1);
} LA42: ;
}
}
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (Ropeobj178006*)0;
LOC47 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC46, 0);
add_178482_2381377266(&result0, LOC47);
}
goto LA22;
LA24: ;
{
localerror_196085_155036129((*ri0).info, ((NimStringDesc*) &T839829468_502));
}
LA22: ;
i0 += ((NI) 1);
}
goto LA18;
LA20: ;
{
Ropeobj178006* LOC52;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(46))) goto LA50;
LOC52 = (Ropeobj178006*)0;
LOC52 = genthisarg_541475_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC52);
i0 += ((NI) 1);
}
goto LA18;
LA50: ;
{
Tnode292802* arg0;
Ropeobj178006* LOC58;
if (!((NU8)(pat0->data[(NI)(i0 + ((NI) 1))]) == (NU8)(91))) goto LA54;
arg0 = skipaddrderef_541433_839829468((*ri_541702_839829468).kindU.S6.sons->data[j0]);
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 63) || (*arg0).kind == ((Tnodekind292020) 64) || (*arg0).kind == ((Tnodekind292020) 66))) goto LA57;
arg0 = HEX5BHEX5D_293238_850551059(arg0, ((NI) 0));
} LA57: ;
}
LOC58 = (Ropeobj178006*)0;
LOC58 = genargnoparam_539938_839829468(p0, arg0);
add_178482_2381377266(&result0, LOC58);
}
goto LA18;
LA54: ;
{
Ropeobj178006* LOC60;
LOC60 = (Ropeobj178006*)0;
LOC60 = genotherarg_539277_839829468(p0, ri_541702_839829468, j0, typ_541704_839829468);
add_178482_2381377266(&result0, LOC60);
}
LA18: ;
j0 += ((NI) 1);
i0 += ((NI) 1);
}
break;
case 39:
{
NI idx0;
NI stars0;
idx0 = (NI)0;
stars0 = (NI)0;
{
NIM_BOOL LOC64;
Ttype292840* t0;
LOC64 = (NIM_BOOL)0;
LOC64 = scancppgenericslot_534827_839829468(pat0, (&i0), (&idx0), (&stars0));
if (!LOC64) goto LA65;
t0 = resolvestarsincpptype_534891_839829468(typ_541704_839829468, idx0, stars0);
{
TY533289 LOC71;
Ropeobj178006* LOC72;
if (!(t0 == NIM_NIL)) goto LA69;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_26), LOC71, 0);
add_178482_2381377266(&result0, LOC72);
}
goto LA67;
LA69: ;
{
Ropeobj178006* LOC74;
LOC74 = (Ropeobj178006*)0;
LOC74 = gettypedesc_535673_839829468((*p0).module, t0);
add_178482_2381377266(&result0, LOC74);
}
LA67: ;
}
LA65: ;
}
break;
default:
{
NI start0;
start0 = i0;
{
while (1) {
if (!(i0 < (pat0 ? pat0->Sup.len : 0))) goto LA77;
{
if (!!((((NU8)(pat0->data[i0])) == ((NU8)(64)) || ((NU8)(pat0->data[i0])) == ((NU8)(35)) || ((NU8)(pat0->data[i0])) == ((NU8)(39))))) goto LA80;
i0 += ((NI) 1);
}
goto LA78;
LA80: ;
{
goto LA76;
}
LA78: ;
} LA77: ;
} LA76: ;
{
NimStringDesc* LOC87;
if (!(start0 <= (NI)(i0 - ((NI) 1)))) goto LA85;
LOC87 = (NimStringDesc*)0;
LOC87 = copyStrLast(pat0, start0, (NI)(i0 - ((NI) 1)));
add_178487_2381377266(&result0, LOC87);
}
LA85: ;
}
break;
}
} LA2: ;
}
return result0;
}
N_NIMCALL(void, fixupcall_539410_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0, Ropeobj178006* callee0, Ropeobj178006* params0) {
Ropeobj178006* pl0;
TY533289 LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
Ttype292840* typ0;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (Ropeobj178006*)0;
LOC2 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_118), LOC1, 0);
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX26_178418_2381377266(callee0, LOC2);
pl0 = HEX26_178418_2381377266(LOC3, params0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA6;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC10) goto LA11;
{
TY533289 LOC17;
Ropeobj178006* LOC18;
if (!!((params0 == NIM_NIL))) goto LA15;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC18 = (Ropeobj178006*)0;
LOC18 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC17, 0);
add_178482_2381377266(&pl0, LOC18);
}
LA15: ;
{
NIM_BOOL LOC21;
NIM_BOOL LOC23;
Ropeobj178006* LOC36;
TY533289 LOC37;
Ropeobj178006* LOC38;
LOC21 = (NIM_BOOL)0;
LOC21 = ((3 &(1U<<((NU)((*d0).k)&15U)))!=0);
if (LOC21) goto LA22;
LOC23 = (NIM_BOOL)0;
LOC23 = leftappearsonrightside_539329_839829468(le0, ri0);
LOC21 = !(LOC23);
LA22: ;
if (!LOC21) goto LA24;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA28;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
goto LA26;
LA28: ;
{
NIM_BOOL LOC31;
NIM_BOOL LOC33;
LOC31 = (NIM_BOOL)0;
LOC31 = !(((66 &(1U<<((NU)((*d0).k)&15U)))!=0));
if (!(LOC31)) goto LA32;
LOC33 = (NIM_BOOL)0;
LOC33 = hasnoinit_539383_839829468(ri0);
LOC31 = !(LOC33);
LA32: ;
if (!LOC31) goto LA34;
resetloc_538350_839829468(p0, d0);
}
goto LA26;
LA34: ;
LA26: ;
LOC36 = (Ropeobj178006*)0;
LOC36 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC36);
memset((void*)LOC37, 0, sizeof(LOC37));
LOC38 = (Ropeobj178006*)0;
LOC38 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC37, 0);
add_178482_2381377266(&pl0, LOC38);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA19;
LA24: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC40;
TY533289 LOC41;
Ropeobj178006* LOC42;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC40 = (Ropeobj178006*)0;
LOC40 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC40);
memset((void*)LOC41, 0, sizeof(LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC41, 0);
add_178482_2381377266(&pl0, LOC42);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA19: ;
}
goto LA8;
LA11: ;
{
TY533289 LOC44;
Ropeobj178006* LOC45;
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_117), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
{
NIM_BOOL LOC48;
NIM_BOOL LOC49;
LOC48 = (NIM_BOOL)0;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
LOC48 = LOC49;
if (!(LOC48)) goto LA51;
LOC48 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA51: ;
if (!LOC48) goto LA52;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA46;
LA52: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA57;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA57: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA46: ;
}
LA8: ;
}
goto LA4;
LA6: ;
{
TY533289 LOC60;
Ropeobj178006* LOC61;
memset((void*)LOC60, 0, sizeof(LOC60));
LOC61 = (Ropeobj178006*)0;
LOC61 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_505), LOC60, 0);
add_178482_2381377266(&pl0, LOC61);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA4: ;
}
N_NIMCALL(void, geninfixcall_541929_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ttype292840* typ_541940_839829468;
NI length0;
NimStringDesc* pat0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
typ_541940_839829468 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC5;
if (!!(!((pat0 == NIM_NIL)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_498);
internalerror_196113_155036129(LOC5);
}
LA3: ;
{
NIM_BOOL LOC8;
Ropeobj178006* pl0;
Ttype292840* typ0;
LOC8 = (NIM_BOOL)0;
LOC8 = contains_110056_4286263276(pat0, T839829468_500);
if (!LOC8) goto LA9;
pl0 = genpatterncall_541699_839829468(p0, ri0, pat0, typ_541940_839829468);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA13;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = (((*d0).flags &(1U<<((NU)(((Tlocflag292810) 8))&15U)))!=0);
LA20: ;
if (!LOC17) goto LA21;
(*d0).k = ((Tlockind292808) 9);
unsureAsgnRef((void**) (&(*d0).r), pl0);
(*d0).flags &= ~(((NU16)1) << ((((Tlocflag292810) 8)) % (sizeof(NU16)*8)));
}
goto LA15;
LA21: ;
{
Tloc292816 list0;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA26;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA26: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), (*d0).t, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA15: ;
}
goto LA11;
LA13: ;
{
TY533289 LOC29;
Ropeobj178006* LOC30;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC30 = (Ropeobj178006*)0;
LOC30 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_497), LOC29, 0);
add_178482_2381377266(&pl0, LOC30);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA11: ;
}
goto LA6;
LA9: ;
{
Ropeobj178006* pl0;
Ropeobj178006* params0;
pl0 = NIM_NIL;
{
NI LOC34;
Ropeobj178006* LOC37;
LOC34 = (NI)0;
LOC34 = len_293081_850551059(ri0);
if (!(((NI) 1) < LOC34)) goto LA35;
LOC37 = (Ropeobj178006*)0;
LOC37 = genthisarg_541475_839829468(p0, ri0, ((NI) 1), typ_541940_839829468);
add_178482_2381377266(&pl0, LOC37);
}
LA35: ;
add_178482_2381377266(&pl0, op0.r);
params0 = (Ropeobj178006*)0;
{
NI i_542425_839829468;
NI HEX3Atmp_542609_839829468;
NI res_542612_839829468;
i_542425_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)0;
HEX3Atmp_542609_839829468 = (NI)(length0 - ((NI) 1));
res_542612_839829468 = ((NI) 2);
{
while (1) {
Ropeobj178006* LOC47;
if (!(res_542612_839829468 <= HEX3Atmp_542609_839829468)) goto LA40;
i_542425_839829468 = res_542612_839829468;
{
TY533289 LOC45;
Ropeobj178006* LOC46;
if (!!((params0 == NIM_NIL))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC45, 0);
add_178482_2381377266(¶ms0, LOC46);
}
LA43: ;
LOC47 = (Ropeobj178006*)0;
LOC47 = genotherarg_539277_839829468(p0, ri0, i_542425_839829468, typ_541940_839829468);
add_178482_2381377266(¶ms0, LOC47);
res_542612_839829468 += ((NI) 1);
} LA40: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, pl0, params0);
}
LA6: ;
}
N_NIMCALL(void, gennamedparamcall_542616_839829468)(Tcproc529021* p0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* pl0;
TY533289 LOC1;
Ttype292840* typ0;
NI length0;
NimStringDesc* pat0;
NI start0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
memset((void*)LOC1, 0, sizeof(LOC1));
pl0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_506), LOC1, 0);
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
pat0 = (*(*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).loc.r).data;
{
NimStringDesc* LOC6;
if (!!(!((pat0 == NIM_NIL)))) goto LA4;
LOC6 = (NimStringDesc*)0;
LOC6 = HEX24_196185_1689653243(T839829468_507);
internalerror_196113_155036129(LOC6);
}
LA4: ;
start0 = ((NI) 3);
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = contains_110046_4286263276(pat0, 32);
if (!LOC9) goto LA10;
start0 = ((NI) 1);
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC18;
if (!(((NI) 1) < length0)) goto LA14;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC16, 0);
add_178482_2381377266(&pl0, LOC17);
LOC18 = (Ropeobj178006*)0;
LOC18 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC18);
start0 = ((NI) 2);
}
LA14: ;
}
goto LA7;
LA10: ;
{
{
Ropeobj178006* LOC24;
TY533289 LOC25;
Ropeobj178006* LOC26;
if (!(((NI) 1) < length0)) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 1)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC24);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC26 = (Ropeobj178006*)0;
LOC26 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC25, 0);
add_178482_2381377266(&pl0, LOC26);
}
LA22: ;
add_178482_2381377266(&pl0, op0.r);
{
TY533289 LOC31;
Ropeobj178006* LOC32;
Ropeobj178006* LOC33;
if (!(((NI) 2) < length0)) goto LA29;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC31, 0);
add_178482_2381377266(&pl0, LOC32);
LOC33 = (Ropeobj178006*)0;
LOC33 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 2)], (*(*(*typ0).n).kindU.S6.sons->data[((NI) 2)]).kindU.S4.sym, ri0);
add_178482_2381377266(&pl0, LOC33);
}
LA29: ;
}
LA7: ;
{
NI i_543051_839829468;
NI HEX3Atmp_543617_839829468;
NI res_543620_839829468;
i_543051_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)0;
HEX3Atmp_543617_839829468 = (NI)(length0 - ((NI) 1));
res_543620_839829468 = start0;
{
while (1) {
Tsym292834* param0;
TY533289 LOC42;
Ropeobj178006* LOC43;
TY533289 LOC44;
Ropeobj178006* LOC45;
Ropeobj178006* LOC46;
if (!(res_543620_839829468 <= HEX3Atmp_543617_839829468)) goto LA36;
i_543051_839829468 = res_543620_839829468;
{
NI LOC39;
LOC39 = (NI)0;
LOC39 = sonslen_295327_850551059(typ0);
if (!(LOC39 <= i_543051_839829468)) goto LA40;
internalerror_196100_155036129((*ri0).info, ((NimStringDesc*) &T839829468_508));
}
LA40: ;
param0 = (*(*(*typ0).n).kindU.S6.sons->data[i_543051_839829468]).kindU.S4.sym;
memset((void*)LOC42, 0, sizeof(LOC42));
LOC43 = (Ropeobj178006*)0;
LOC43 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC42, 0);
add_178482_2381377266(&pl0, LOC43);
add_178487_2381377266(&pl0, (*(*param0).name).s);
memset((void*)LOC44, 0, sizeof(LOC44));
LOC45 = (Ropeobj178006*)0;
LOC45 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_244), LOC44, 0);
add_178482_2381377266(&pl0, LOC45);
LOC46 = (Ropeobj178006*)0;
LOC46 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_543051_839829468], param0, ri0);
add_178482_2381377266(&pl0, LOC46);
res_543620_839829468 += ((NI) 1);
} LA36: ;
}
}
{
if (!!(((*typ0).sons->data[((NI) 0)] == NIM_NIL))) goto LA49;
{
NIM_BOOL LOC53;
LOC53 = (NIM_BOOL)0;
LOC53 = isinvalidreturntype_533550_839829468((*typ0).sons->data[((NI) 0)]);
if (!LOC53) goto LA54;
{
NI LOC58;
TY533289 LOC61;
Ropeobj178006* LOC62;
LOC58 = (NI)0;
LOC58 = sonslen_295351_850551059(ri0);
if (!(((NI) 1) < LOC58)) goto LA59;
memset((void*)LOC61, 0, sizeof(LOC61));
LOC62 = (Ropeobj178006*)0;
LOC62 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_111), LOC61, 0);
add_178482_2381377266(&pl0, LOC62);
}
LA59: ;
{
TY533289 LOC71;
Ropeobj178006* LOC72;
Ropeobj178006* LOC73;
TY533289 LOC74;
Ropeobj178006* LOC75;
if (!((3 &(1U<<((NU)((*d0).k)&15U)))!=0)) goto LA65;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA69;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_TRUE);
}
LA69: ;
memset((void*)LOC71, 0, sizeof(LOC71));
LOC72 = (Ropeobj178006*)0;
LOC72 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_509), LOC71, 0);
add_178482_2381377266(&pl0, LOC72);
LOC73 = (Ropeobj178006*)0;
LOC73 = addrloc_538204_839829468((&(*d0)));
add_178482_2381377266(&pl0, LOC73);
memset((void*)LOC74, 0, sizeof(LOC74));
LOC75 = (Ropeobj178006*)0;
LOC75 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC74, 0);
add_178482_2381377266(&pl0, LOC75);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
goto LA63;
LA65: ;
{
Tloc292816 tmp0;
Ropeobj178006* LOC77;
TY533289 LOC78;
Ropeobj178006* LOC79;
memset((void*)(&tmp0), 0, sizeof(tmp0));
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], (&tmp0), NIM_TRUE);
LOC77 = (Ropeobj178006*)0;
LOC77 = addrloc_538204_839829468((&tmp0));
add_178482_2381377266(&pl0, LOC77);
memset((void*)LOC78, 0, sizeof(LOC78));
LOC79 = (Ropeobj178006*)0;
LOC79 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC78, 0);
add_178482_2381377266(&pl0, LOC79);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA63: ;
}
goto LA51;
LA54: ;
{
TY533289 LOC81;
Ropeobj178006* LOC82;
Tloc292816 list0;
memset((void*)LOC81, 0, sizeof(LOC81));
LOC82 = (Ropeobj178006*)0;
LOC82 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_511), LOC81, 0);
add_178482_2381377266(&pl0, LOC82);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA85;
gettemp_537032_839829468(p0, (*typ0).sons->data[((NI) 0)], d0, NIM_FALSE);
}
LA85: ;
memset((void*)(&list0), 0, sizeof(list0));
initloc_532273_839829468((&list0), ((Tlockind292808) 9), NIM_NIL, ((Tstorageloc292812) 0));
list0.r = pl0;
genassignment_539264_839829468(p0, (&(*d0)), (&list0), 0);
}
LA51: ;
}
goto LA47;
LA49: ;
{
TY533289 LOC88;
Ropeobj178006* LOC89;
memset((void*)LOC88, 0, sizeof(LOC88));
LOC89 = (Ropeobj178006*)0;
LOC89 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_510), LOC88, 0);
add_178482_2381377266(&pl0, LOC89);
line_532690_839829468(p0, ((Tcprocsection529011) 2), pl0);
}
LA47: ;
}
N_NIMCALL(void, genprefixcall_539960_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
Tloc292816 op0;
Ropeobj178006* params0;
Ttype292840* typ0;
NI length0;
memset((void*)(&op0), 0, sizeof(op0));
initlocexpr_539283_839829468(p0, (*ri0).kindU.S6.sons->data[((NI) 0)], (&op0));
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
length0 = sonslen_295351_850551059(ri0);
{
NI i_540213_839829468;
NI HEX3Atmp_540445_839829468;
NI res_540448_839829468;
i_540213_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)0;
HEX3Atmp_540445_839829468 = (NI)(length0 - ((NI) 1));
res_540448_839829468 = ((NI) 1);
{
while (1) {
if (!(res_540448_839829468 <= HEX3Atmp_540445_839829468)) goto LA3;
i_540213_839829468 = res_540448_839829468;
{
NI LOC6;
Tnode292802* paramtype0;
LOC6 = (NI)0;
LOC6 = sonslen_295327_850551059(typ0);
if (!(i_540213_839829468 < LOC6)) goto LA7;
paramtype0 = (*(*typ0).n).kindU.S6.sons->data[i_540213_839829468];
{
NIM_BOOL LOC11;
Ropeobj178006* LOC20;
LOC11 = (NIM_BOOL)0;
LOC11 = iscompiletimeonly_328706_3876443242((*paramtype0).typ);
if (!!(LOC11)) goto LA12;
{
TY533289 LOC18;
Ropeobj178006* LOC19;
if (!!((params0 == NIM_NIL))) goto LA16;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (Ropeobj178006*)0;
LOC19 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC18, 0);
add_178482_2381377266(¶ms0, LOC19);
}
LA16: ;
LOC20 = (Ropeobj178006*)0;
LOC20 = genarg_539787_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468], (*paramtype0).kindU.S4.sym, ri0);
add_178482_2381377266(¶ms0, LOC20);
}
LA12: ;
}
goto LA4;
LA7: ;
{
Ropeobj178006* LOC28;
{
TY533289 LOC26;
Ropeobj178006* LOC27;
if (!!((params0 == NIM_NIL))) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC27 = (Ropeobj178006*)0;
LOC27 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC26, 0);
add_178482_2381377266(¶ms0, LOC27);
}
LA24: ;
LOC28 = (Ropeobj178006*)0;
LOC28 = genargnoparam_539938_839829468(p0, (*ri0).kindU.S6.sons->data[i_540213_839829468]);
add_178482_2381377266(¶ms0, LOC28);
}
LA4: ;
res_540448_839829468 += ((NI) 1);
} LA3: ;
}
}
fixupcall_539410_839829468(p0, le0, ri0, d0, op0.r, params0);
}
static N_INLINE(void, poststmtactions_532942_839829468)(Tcproc529021* p0) {
Ropeobj178006** LOC1;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC1, (*(*p0).module).injectstmt);
}
N_NIMCALL(void, gencall_543632_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, NIM_NIL, e0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, e0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, NIM_NIL, e0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
N_NIMCALL(void, genreset_554731_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY532811 LOC1;
Ttype292840* LOC2;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = addrloc_538204_839829468((&a0));
LOC2 = (Ttype292840*)0;
LOC2 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
LOC1[1] = gentypeinfo_535941_839829468((*p0).module, LOC2);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_496), LOC1, 2);
}
N_NIMCALL(void, genecho_554369_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NIM_BOOL LOC6;
Ropeobj178006* args0;
Tloc292816 a0;
TY532811 LOC18;
NimStringDesc* LOC19;
NI LOC20;
NimStringDesc* LOC21;
TY533289 LOC22;
{
NimStringDesc* LOC5;
if (!!(((*n0).kind == ((Tnodekind292020) 41)))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_512);
internalerror_196113_155036129(LOC5);
}
LA3: ;
LOC6 = (NIM_BOOL)0;
LOC6 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_513));
args0 = NIM_NIL;
memset((void*)(&a0), 0, sizeof(a0));
{
NI i_554404_839829468;
NI HEX3Atmp_554431_839829468;
NI LOC8;
NI res_554434_839829468;
i_554404_839829468 = (NI)0;
HEX3Atmp_554431_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
HEX3Atmp_554431_839829468 = (NI)(LOC8 - ((NI) 1));
res_554434_839829468 = ((NI) 0);
{
while (1) {
if (!(res_554434_839829468 <= HEX3Atmp_554431_839829468)) goto LA10;
i_554404_839829468 = res_554434_839829468;
{
Tnode292802* LOC13;
LOC13 = (Tnode292802*)0;
LOC13 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[i_554404_839829468]);
if (!((*LOC13).kind == ((Tnodekind292020) 23))) goto LA14;
add_178487_2381377266(&args0, ((NimStringDesc*) &T839829468_514));
}
goto LA11;
LA14: ;
{
TY178507 LOC17;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[i_554404_839829468], (&a0));
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rdloc_538188_839829468((&a0));
addf_179205_2381377266(&args0, ((NimStringDesc*) &T839829468_515), LOC17, 1);
}
LA11: ;
res_554434_839829468 += ((NI) 1);
} LA10: ;
}
}
memset((void*)LOC18, 0, sizeof(LOC18));
LOC19 = (NimStringDesc*)0;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(n0);
LOC21 = (NimStringDesc*)0;
LOC21 = nsuRepeatStr(((NimStringDesc*) &T839829468_517), ((NI) (LOC20)));
LOC19 = rawNewString(LOC21->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC19, LOC21);
appendString(LOC19, tnl_176644_4151366050);
LOC18[0] = makecstring_191638_155036129(LOC19);
LOC18[1] = args0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_516), LOC18, 2);
memset((void*)LOC22, 0, sizeof(LOC22));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_518), LOC22, 0);
}
N_NIMCALL(void, genseqconstr_555004_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 arr0;
NI LOC5;
Ropeobj178006* LOC6;
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA3: ;
LOC5 = (NI)0;
LOC5 = sonslen_295351_850551059(t0);
LOC6 = (Ropeobj178006*)0;
LOC6 = intliteral_539270_839829468(((NI64) (LOC5)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC6);
{
NI i_555031_839829468;
NI HEX3Atmp_555039_839829468;
NI LOC8;
NI res_555042_839829468;
i_555031_839829468 = (NI)0;
HEX3Atmp_555039_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = sonslen_295351_850551059(t0);
HEX3Atmp_555039_839829468 = (NI)(LOC8 - ((NI) 1));
res_555042_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC11;
Ttype292840* LOC12;
TY532811 LOC13;
if (!(res_555042_839829468 <= HEX3Atmp_555039_839829468)) goto LA10;
i_555031_839829468 = res_555042_839829468;
LOC11 = (Ttype292840*)0;
LOC11 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC12 = (Ttype292840*)0;
LOC12 = elemtype_320394_3876443242(LOC11);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC12, ((Tstorageloc292812) 3));
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468((&(*d0)));
LOC13[1] = intliteral_539270_839829468(((NI64) (i_555031_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC13, 2);
arr0.s = ((Tstorageloc292812) 3);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[i_555031_839829468], (&arr0));
res_555042_839829468 += ((NI) 1);
} LA10: ;
}
}
gcusage_554439_839829468(t0);
}
N_NIMCALL(void, genarrtoseq_555046_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Tloc292816 elem0;
Tloc292816 a0;
Tloc292816 arr0;
NI L0;
NI64 LOC9;
Ropeobj178006* LOC10;
{ memset((void*)(&elem0), 0, sizeof(elem0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&arr0), 0, sizeof(arr0));
{
if (!((*t0).kind == ((Tnodekind292020) 41))) goto LA3;
asgnRefNoCycle((void**) (&(*(*t0).kindU.S6.sons->data[((NI) 1)]).typ), (*t0).typ);
genseqconstr_555004_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], d0);
goto BeforeRet;
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA7: ;
LOC9 = (NI64)0;
LOC9 = lengthord_320007_3876443242((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ);
L0 = ((NI) (LOC9));
LOC10 = (Ropeobj178006*)0;
LOC10 = intliteral_539270_839829468(((NI64) (L0)));
gennewseqaux_554795_839829468(p0, (&(*d0)), LOC10);
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], (&a0));
{
NI i_555090_839829468;
NI HEX3Atmp_555104_839829468;
NI res_555107_839829468;
i_555090_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)0;
HEX3Atmp_555104_839829468 = (NI)(L0 - ((NI) 1));
res_555107_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
Ttype292840* LOC17;
Ttype292840* LOC18;
TY532811 LOC19;
if (!(res_555107_839829468 <= HEX3Atmp_555104_839829468)) goto LA13;
i_555090_839829468 = res_555107_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*t0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&elem0), ((Tlockind292808) 6), LOC15, ((Tstorageloc292812) 3));
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
elem0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC16, 2);
elem0.s = ((Tstorageloc292812) 3);
LOC17 = (Ttype292840*)0;
LOC17 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106232576256));
LOC18 = (Ttype292840*)0;
LOC18 = elemtype_320394_3876443242(LOC17);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC18, a0.s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&a0));
LOC19[1] = intliteral_539270_839829468(((NI64) (i_555090_839829468)));
arr0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC19, 2);
genassignment_539264_839829468(p0, (&elem0), (&arr0), 3);
res_555107_839829468 += ((NI) 1);
} LA13: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, gendeepcopy_550374_839829468)(Tcproc529021* p0, Tloc292816* dest0, Tloc292816* src0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*dest0).t, IL64(211106242013440));
switch ((*ty0).kind) {
case ((Ttypekind292244) 21):
case ((Ttypekind292244) 22):
case ((Ttypekind292244) 25):
case ((Ttypekind292244) 18):
case ((Ttypekind292244) 17):
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
TY535238 LOC2;
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = addrloc_538204_839829468(dest0);
LOC2[1] = addrloc_538204_839829468(src0);
LOC2[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_519), LOC2, 3);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
TY535238 LOC4;
memset((void*)LOC4, 0, sizeof(LOC4));
LOC4[0] = addrloc_538204_839829468(dest0);
LOC4[1] = rdloc_538188_839829468(src0);
LOC4[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_520), LOC4, 3);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
TY535238 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = addrloc_538204_839829468(dest0);
LOC6[1] = addrloc_538204_839829468(src0);
LOC6[2] = gentypeinfo_535941_839829468((*p0).module, (*dest0).t);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_521), LOC6, 3);
}
break;
case ((Ttypekind292244) 19):
{
{
Tctypekind529007 LOC10;
TY535238 LOC13;
NI64 LOC14;
LOC10 = (Tctypekind529007)0;
LOC10 = maptype_533394_839829468(ty0);
if (!(LOC10 == ((Tctypekind529007) 17))) goto LA11;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = rdloc_538188_839829468(dest0);
LOC13[1] = rdloc_538188_839829468(src0);
LOC14 = (NI64)0;
LOC14 = getsize_320135_3876443242((*dest0).t);
LOC13[2] = rope_178401_2381377266(LOC14);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_268), LOC13, 3);
}
goto LA8;
LA11: ;
{
TY532811 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468(dest0);
LOC16[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC16, 2);
}
LA8: ;
}
break;
case ((Ttypekind292244) 26):
case ((Ttypekind292244) 2):
case ((Ttypekind292244) 1):
case ((Ttypekind292244) 14):
case ((Ttypekind292244) 29):
case ((Ttypekind292244) 31) ... ((Ttypekind292244) 44):
case ((Ttypekind292244) 20):
case ((Ttypekind292244) 23):
{
TY532811 LOC18;
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rdloc_538188_839829468(dest0);
LOC18[1] = rdloc_538188_839829468(src0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_123), LOC18, 2);
}
break;
default:
{
NimStringDesc* LOC20;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 13);
appendString(LOC20, ((NimStringDesc*) &T839829468_522));
appendString(LOC20, reprEnum((NI)(*ty0).kind, (&NTI292244)));
internalerror_196113_155036129(LOC20);
}
break;
}
}
N_NIMCALL(void, genmagicexpr_557033_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tmagic292524 op0) {
switch (op0) {
case ((Tmagic292524) 127):
case ((Tmagic292524) 126):
{
genandor_554311_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 99) ... ((Tmagic292524) 117):
{
unaryarith_552646_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 96) ... ((Tmagic292524) 98):
{
unaryarithoverflow_551633_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 52) ... ((Tmagic292524) 55):
{
binaryfloatarith_556729_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 56) ... ((Tmagic292524) 93):
{
binaryarith_551819_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 95):
{
geneqproc_552214_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 45) ... ((Tmagic292524) 51):
{
binaryarithoverflow_551262_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 149):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 259):
{
gengettypeinfo_555383_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 156):
{
genswap_555638_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 25):
{
{
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0))) goto LA14;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_385));
}
goto LA12;
LA14: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_386));
}
LA12: ;
}
break;
case ((Tmagic292524) 26):
case ((Tmagic292524) 27):
{
Ttype292840* underlying0;
underlying0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 9439232);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = !((((*p0).options &(1U<<((NU)(((Toption169009) 5))&31U)))!=0));
if (LOC20) goto LA21;
LOC20 = ((IL64(34084860461056) &((NU64)1<<((NU)((*underlying0).kind)&63U)))!=0);
LA21: ;
if (!LOC20) goto LA22;
binarystmt_550501_839829468(p0, e0, d0, opr_557050_839829468[(op0)- 26]);
}
goto LA18;
LA22: ;
{
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ranged0;
Ropeobj178006* res0;
NimStringDesc* LOC25;
TY532811 LOC31;
Ropeobj178006* LOC32;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
ranged0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 8390656);
LOC25 = (NimStringDesc*)0;
{
if (!((*underlying0).kind == ((Ttypekind292244) 35))) goto LA28;
LOC25 = copyString(fun64_557055_839829468[(op0)- 26]);
}
goto LA26;
LA28: ;
{
LOC25 = copyString(fun_557060_839829468[(op0)- 26]);
}
LA26: ;
res0 = binaryarithoverflowraw_551235_839829468(p0, ranged0, (&a0), (&b0), LOC25);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = gettypedesc_535673_839829468((*p0).module, ranged0);
LOC31[1] = res0;
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_370), LOC31, 2);
putintodest_550468_839829468(p0, (&a0), ranged0, LOC32, ((Tstorageloc292812) 0));
}
LA18: ;
}
break;
case ((Tmagic292524) 138):
{
genstrconcat_554452_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 144):
{
binarystmt_550501_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_394));
}
break;
case ((Tmagic292524) 145):
{
genstrappend_554554_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 146):
{
genseqelemappend_554683_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 128):
{
genstrequals_556667_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 129):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_402));
}
break;
case ((Tmagic292524) 130):
{
binaryexpr_550549_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_403));
}
break;
case ((Tmagic292524) 157):
{
genisnil_552620_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 120):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_406));
}
break;
case ((Tmagic292524) 121):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_407));
}
break;
case ((Tmagic292524) 119):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_408));
}
break;
case ((Tmagic292524) 118):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_409));
}
break;
case ((Tmagic292524) 122):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_410));
}
break;
case ((Tmagic292524) 123):
{
gendollar_555391_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_411));
}
break;
case ((Tmagic292524) 124):
{
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Tmagic292524) 125):
{
genrepr_555339_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 12):
{
genof_555331_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 29):
{
gennew_554782_839829468(p0, e0);
}
break;
case ((Tmagic292524) 30):
{
gennewfinalize_555111_839829468(p0, e0);
}
break;
case ((Tmagic292524) 31):
{
gennewseq_554824_839829468(p0, e0);
}
break;
case ((Tmagic292524) 32):
{
gennewseqofcap_554836_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 9):
{
Ttype292840* t0;
TY178507 LOC55;
Ropeobj178006* LOC56;
t0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, 256);
memset((void*)LOC55, 0, sizeof(LOC55));
LOC55[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC56 = (Ropeobj178006*)0;
LOC56 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_428), LOC55, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC56, ((Tstorageloc292812) 0));
}
break;
case ((Tmagic292524) 42):
{
gensomecast_556481_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 28):
{
genord_556475_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 35):
case ((Tmagic292524) 8):
case ((Tmagic292524) 34):
case ((Tmagic292524) 36):
case ((Tmagic292524) 33):
{
genarraylen_555415_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 37):
case ((Tmagic292524) 38):
{
{
NIM_BOOL LOC63;
LOC63 = (NIM_BOOL)0;
LOC63 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC63) goto LA64;
LOC63 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA64: ;
if (!!(LOC63)) goto LA65;
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_440));
}
goto LA61;
LA65: ;
{
unaryexpr_551209_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_441));
}
LA61: ;
}
break;
case ((Tmagic292524) 43):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_443));
}
break;
case ((Tmagic292524) 44):
{
unarystmt_550527_839829468(p0, e0, d0, ((NimStringDesc*) &T839829468_444));
}
break;
case ((Tmagic292524) 151):
{
gensetlengthstr_555632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 152):
{
gensetlengthseq_555500_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 39):
case ((Tmagic292524) 40):
case ((Tmagic292524) 41):
case ((Tmagic292524) 133):
case ((Tmagic292524) 132):
case ((Tmagic292524) 131):
case ((Tmagic292524) 134):
case ((Tmagic292524) 135):
case ((Tmagic292524) 136):
case ((Tmagic292524) 148):
{
gensetop_556419_839829468(p0, e0, d0, op0);
}
break;
case ((Tmagic292524) 161):
case ((Tmagic292524) 162):
case ((Tmagic292524) 159):
case ((Tmagic292524) 160):
case ((Tmagic292524) 150):
case ((Tmagic292524) 163):
{
Tsym292834* opr0;
opr0 = (*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NimStringDesc* LOC78;
Ropeobj178006* LOC79;
if (!!((((*opr0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0))) goto LA76;
LOC78 = (NimStringDesc*)0;
LOC78 = HEX24_178856_2381377266((*opr0).loc.r);
LOC79 = (Ropeobj178006*)0;
LOC79 = cgsym_532403_839829468((*p0).module, LOC78);
}
LA76: ;
gencall_543632_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 164):
{
genreset_554731_839829468(p0, e0);
}
break;
case ((Tmagic292524) 17):
{
Tnode292802* LOC82;
Tnode292802* LOC83;
LOC82 = (Tnode292802*)0;
LOC82 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
LOC83 = (Tnode292802*)0;
LOC83 = skipconv_328882_3876443242(LOC82);
genecho_554369_839829468(p0, LOC83);
}
break;
case ((Tmagic292524) 158):
{
genarrtoseq_555046_839829468(p0, e0, d0);
}
break;
case ((Tmagic292524) 223) ... ((Tmagic292524) 257):
case ((Tmagic292524) 19) ... ((Tmagic292524) 24):
{
localerror_196080_155036129((*e0).info, ((Tmsgkind191002) 229), (*(*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).name).s);
}
break;
case ((Tmagic292524) 208):
{
Tnode292802* n0;
n0 = wrapprocforspawn_435501_2218250499((*(*p0).module).module, e0, (*e0).typ, NIM_NIL, NIM_NIL);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 155):
{
Tnode292802* n0;
n0 = liftparallel_478822_1773027539((*(*p0).module).module, e0);
expr_539248_839829468(p0, n0, d0);
}
break;
case ((Tmagic292524) 209):
{
Tloc292816 a0;
Tloc292816 b0;
Tnode292802* x0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
{
Tnode292802* LOC91;
Tnode292802* LOC94;
LOC91 = (Tnode292802*)0;
LOC91 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
if (!((*LOC91).kind == ((Tnodekind292020) 63) || (*LOC91).kind == ((Tnodekind292020) 64))) goto LA92;
LOC94 = (Tnode292802*)0;
LOC94 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
x0 = HEX5BHEX5D_293238_850551059(LOC94, ((NI) 0));
}
goto LA89;
LA92: ;
{
x0 = HEX5BHEX5D_293238_850551059(e0, ((NI) 1));
}
LA89: ;
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 2)], (&b0));
gendeepcopy_550374_839829468(p0, (&a0), (&b0));
}
break;
case ((Tmagic292524) 140):
case ((Tmagic292524) 94):
{
gencall_543632_839829468(p0, e0, d0);
}
break;
default:
{
NimStringDesc* LOC98;
LOC98 = (NimStringDesc*)0;
LOC98 = rawNewString(reprEnum((NI)op0, (&NTI292524))->Sup.len + 14);
appendString(LOC98, ((NimStringDesc*) &T839829468_523));
appendString(LOC98, reprEnum((NI)op0, (&NTI292524)));
internalerror_196100_155036129((*e0).info, LOC98);
}
break;
}
}
N_NIMCALL(Ropeobj178006*, gensetnode_549664_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Ropeobj178006* result0;
Tbitset339004* cs0;
NI size0;
NI64 LOC1;
result0 = (Ropeobj178006*)0;
cs0 = (Tbitset339004*)0;
LOC1 = (NI64)0;
LOC1 = getsize_320135_3876443242((*n0).typ);
size0 = ((NI) (LOC1));
tobitset_340001_452470228(n0, (&cs0));
{
NI id0;
Ropeobj178006* LOC6;
if (!(((NI) 8) < size0)) goto LA4;
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) (id0)));
result0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC6);
{
TY535238 LOC11;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA9;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC11[1] = result0;
LOC11[2] = genrawsetdata_549629_839829468(cs0, size0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC11, 3);
}
LA9: ;
}
goto LA2;
LA4: ;
{
result0 = genrawsetdata_549629_839829468(cs0, size0);
}
LA2: ;
return result0;
}
N_NIMCALL(void, gensetconstr_557496_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Tloc292816 idx0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
memset((void*)(&idx0), 0, sizeof(idx0));
{
Ropeobj178006* LOC5;
if (!(((*e0).flags &(1U<<((NU)(((Tnodeflag292427) 4))&15U)))!=0)) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = gensetnode_549664_839829468(p0, e0);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC5, ((Tstorageloc292812) 0));
}
goto LA1;
LA3: ;
{
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, (*e0).typ, d0, NIM_FALSE);
}
LA9: ;
{
NI64 LOC13;
TY178507 LOC16;
LOC13 = (NI64)0;
LOC13 = getsize_320135_3876443242((*e0).typ);
if (!(IL64(8) < LOC13)) goto LA14;
usestringh_532345_839829468((*p0).module);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_525), LOC16, 1);
{
NI i_557537_839829468;
NI HEX3Atmp_557603_839829468;
NI LOC18;
NI res_557606_839829468;
i_557537_839829468 = (NI)0;
HEX3Atmp_557603_839829468 = (NI)0;
LOC18 = (NI)0;
LOC18 = sonslen_295351_850551059(e0);
HEX3Atmp_557603_839829468 = (NI)(LOC18 - ((NI) 1));
res_557606_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557606_839829468 <= HEX3Atmp_557603_839829468)) goto LA20;
i_557537_839829468 = res_557606_839829468;
{
Ttype292840* LOC25;
TY535235 LOC26;
if (!((*(*e0).kindU.S6.sons->data[i_557537_839829468]).kind == ((Tnodekind292020) 44))) goto LA23;
LOC25 = (Ttype292840*)0;
LOC25 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC25, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557537_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&idx0));
LOC26[1] = rdloc_538188_839829468((&(*d0)));
LOC26[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC26[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_526), LOC26, 4);
}
goto LA21;
LA23: ;
{
TY532811 LOC28;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557537_839829468], (&a0));
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = rdloc_538188_839829468((&(*d0)));
LOC28[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_527), LOC28, 2);
}
LA21: ;
res_557606_839829468 += ((NI) 1);
} LA20: ;
}
}
}
goto LA11;
LA14: ;
{
NimStringDesc* ts0;
NimStringDesc* LOC30;
NI64 LOC31;
NimStringDesc* LOC32;
TY178507 LOC33;
LOC30 = (NimStringDesc*)0;
LOC31 = (NI64)0;
LOC31 = getsize_320135_3876443242((*e0).typ);
LOC32 = (NimStringDesc*)0;
LOC32 = nimInt64ToStr((NI64)(LOC31 * IL64(8)));
LOC30 = rawNewString(LOC32->Sup.len + 2);
appendString(LOC30, ((NimStringDesc*) &T839829468_45));
appendString(LOC30, LOC32);
ts0 = LOC30;
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rdloc_538188_839829468((&(*d0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_494), LOC33, 1);
{
NI i_557575_839829468;
NI HEX3Atmp_557611_839829468;
NI LOC35;
NI res_557614_839829468;
i_557575_839829468 = (NI)0;
HEX3Atmp_557611_839829468 = (NI)0;
LOC35 = (NI)0;
LOC35 = sonslen_295351_850551059(e0);
HEX3Atmp_557611_839829468 = (NI)(LOC35 - ((NI) 1));
res_557614_839829468 = ((NI) 0);
{
while (1) {
if (!(res_557614_839829468 <= HEX3Atmp_557611_839829468)) goto LA37;
i_557575_839829468 = res_557614_839829468;
{
Ttype292840* LOC42;
NimStringDesc* LOC43;
TY535235 LOC44;
if (!((*(*e0).kindU.S6.sons->data[i_557575_839829468]).kind == ((Tnodekind292020) 44))) goto LA40;
LOC42 = (Ttype292840*)0;
LOC42 = getsystype_338150_3937434831(((Ttypekind292244) 31));
gettemp_537032_839829468(p0, LOC42, (&idx0), NIM_FALSE);
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*(*e0).kindU.S6.sons->data[i_557575_839829468]).kindU.S6.sons->data[((NI) 1)], (&b0));
LOC43 = (NimStringDesc*)0;
LOC43 = rawNewString(ts0->Sup.len + ts0->Sup.len + 68);
appendString(LOC43, ((NimStringDesc*) &T839829468_528));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_529));
appendString(LOC43, ts0);
appendString(LOC43, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC44, 0, sizeof(LOC44));
LOC44[0] = rdloc_538188_839829468((&idx0));
LOC44[1] = rdloc_538188_839829468((&(*d0)));
LOC44[2] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
LOC44[3] = rdsetelemloc_555662_839829468((&b0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC43, LOC44, 4);
}
goto LA38;
LA40: ;
{
NimStringDesc* LOC46;
TY532811 LOC47;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[i_557575_839829468], (&a0));
LOC46 = (NimStringDesc*)0;
LOC46 = rawNewString(ts0->Sup.len + ts0->Sup.len + 36);
appendString(LOC46, ((NimStringDesc*) &T839829468_530));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_531));
appendString(LOC46, ts0);
appendString(LOC46, ((NimStringDesc*) &T839829468_454));
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = rdloc_538188_839829468((&(*d0)));
LOC47[1] = rdsetelemloc_555662_839829468((&a0), (*e0).typ);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), LOC46, LOC47, 2);
}
LA38: ;
res_557614_839829468 += ((NI) 1);
} LA37: ;
}
}
}
LA11: ;
}
LA1: ;
}
N_NIMCALL(void, exprcomplexconst_558684_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI id0;
Ropeobj178006* tmp0;
Ropeobj178006* LOC2;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC1 = (Ropeobj178006*)0;
LOC1 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC2);
{
TY535238 LOC7;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA5;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC7, 3);
}
LA5: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA10;
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, tmp0, ((Tstorageloc292812) 1));
}
goto LA8;
LA10: ;
{
putdataintodest_550436_839829468(p0, d0, t0, tmp0);
{
if (!!(((285212672 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0))) goto LA15;
(*d0).s = ((Tstorageloc292812) 1);
}
LA15: ;
}
LA8: ;
}
N_NIMCALL(NIM_BOOL, handleconstexpr_554853_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
NI LOC6;
Ttype292840* t0;
Ropeobj178006* LOC10;
NI id0;
Ropeobj178006* LOC11;
Ropeobj178006* LOC12;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC4)) goto LA5;
LOC6 = (NI)0;
LOC6 = len_293081_850551059(n0);
LOC4 = (((NI) (((*n0).kind == ((Tnodekind292020) 38)))) < LOC6);
LA5: ;
LOC3 = LOC4;
if (!(LOC3)) goto LA7;
LOC3 = isdeepconstexpr_318566_2616423590(n0);
LA7: ;
if (!LOC3) goto LA8;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC10 = (Ropeobj178006*)0;
LOC10 = gettypedesc_535673_839829468((*p0).module, t0);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), n0, ((NI) ((*(*p0).module).labels)));
LOC11 = (Ropeobj178006*)0;
LOC11 = rope_178401_2381377266(((NI64) (id0)));
LOC12 = (Ropeobj178006*)0;
LOC12 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC11);
fillloc_532282_839829468(d0, ((Tlockind292808) 8), t0, LOC12, ((Tstorageloc292812) 1));
{
TY535238 LOC17;
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA15;
(*(*p0).module).labels += ((NI) 1);
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = gettypedesc_535673_839829468((*p0).module, t0);
LOC17[1] = (*d0).r;
LOC17[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_272), LOC17, 3);
}
LA15: ;
result0 = NIM_TRUE;
}
goto LA1;
LA8: ;
{
result0 = NIM_FALSE;
}
LA1: ;
return result0;
}
N_NIMCALL(void, genarrayconstr_558207_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 arr0;
memset((void*)(&arr0), 0, sizeof(arr0));
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA8: ;
{
NI i_558234_839829468;
NI HEX3Atmp_558242_839829468;
NI LOC11;
NI res_558245_839829468;
i_558234_839829468 = (NI)0;
HEX3Atmp_558242_839829468 = (NI)0;
LOC11 = (NI)0;
LOC11 = sonslen_295351_850551059(n0);
HEX3Atmp_558242_839829468 = (NI)(LOC11 - ((NI) 1));
res_558245_839829468 = ((NI) 0);
{
while (1) {
Ttype292840* LOC14;
Ttype292840* LOC15;
TY532811 LOC16;
if (!(res_558245_839829468 <= HEX3Atmp_558242_839829468)) goto LA13;
i_558234_839829468 = res_558245_839829468;
LOC14 = (Ttype292840*)0;
LOC14 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC15 = (Ttype292840*)0;
LOC15 = elemtype_320394_3876443242(LOC14);
initloc_532273_839829468((&arr0), ((Tlockind292808) 6), LOC15, (*d0).s);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&(*d0)));
LOC16[1] = intliteral_539270_839829468(((NI64) (i_558234_839829468)));
arr0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_138), LOC16, 2);
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[i_558234_839829468], (&arr0));
res_558245_839829468 += ((NI) 1);
} LA13: ;
}
}
}
LA4: ;
}
N_NIMCALL(void, gentupleconstr_557618_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 rec0;
memset((void*)(&rec0), 0, sizeof(rec0));
{
NIM_BOOL LOC3;
Ttype292840* t0;
Ropeobj178006* LOC6;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, n0, d0);
if (!!(LOC3)) goto LA4;
t0 = getuniquetype_528640_2036603609((*n0).typ);
LOC6 = (Ropeobj178006*)0;
LOC6 = gettypedesc_535673_839829468((*p0).module, t0);
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA9;
gettemp_537032_839829468(p0, t0, d0, NIM_FALSE);
}
LA9: ;
{
NI i_557646_839829468;
NI HEX3Atmp_557803_839829468;
NI LOC12;
NI res_557806_839829468;
i_557646_839829468 = (NI)0;
HEX3Atmp_557803_839829468 = (NI)0;
LOC12 = (NI)0;
LOC12 = sonslen_295351_850551059(n0);
HEX3Atmp_557803_839829468 = (NI)(LOC12 - ((NI) 1));
res_557806_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
TY532811 LOC19;
if (!(res_557806_839829468 <= HEX3Atmp_557803_839829468)) goto LA14;
i_557646_839829468 = res_557806_839829468;
it0 = (*n0).kindU.S6.sons->data[i_557646_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 34))) goto LA17;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA17: ;
initloc_532273_839829468((&rec0), ((Tlockind292808) 6), (*it0).typ, (*d0).s);
memset((void*)LOC19, 0, sizeof(LOC19));
LOC19[0] = rdloc_538188_839829468((&(*d0)));
LOC19[1] = rope_178401_2381377266(((NI64) (i_557646_839829468)));
rec0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC19, 2);
expr_539248_839829468(p0, it0, (&rec0));
res_557806_839829468 += ((NI) 1);
} LA14: ;
}
}
}
LA4: ;
}
N_NIMCALL(Tsym292834*, lookupfieldagain_553154_839829468)(Tcproc529021* p0, Ttype292840* ty_553157_839829468, Tsym292834* field0, Ropeobj178006** r0) {
Tsym292834* result0;
Ttype292840* ty0;
result0 = (Tsym292834*)0;
ty0 = ty_553157_839829468;
{
while (1) {
if (!!((ty0 == NIM_NIL))) goto LA2;
ty0 = skiptypes_296099_850551059(ty0, IL64(211106247215360));
result0 = lookupinrecord_299119_2984716966((*ty0).n, (*field0).name);
{
if (!!((result0 == NIM_NIL))) goto LA5;
goto LA1;
}
LA5: ;
{
NIM_BOOL LOC9;
LOC9 = (NIM_BOOL)0;
LOC9 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC9) goto LA10;
LOC9 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA10: ;
if (!!(LOC9)) goto LA11;
add_178487_2381377266(r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
ty0 = getuniquetype_528640_2036603609((*ty0).sons->data[((NI) 0)]);
} LA2: ;
} LA1: ;
{
if (!(result0 == NIM_NIL)) goto LA15;
internalerror_196100_155036129((*field0).info, ((NimStringDesc*) &T839829468_532));
}
LA15: ;
return result0;
}
N_NIMCALL(void, genfieldcheck_553504_839829468)(Tcproc529021* p0, Tnode292802* e0, Ropeobj178006* obj0, Tsym292834* field0, Ttype292840* origty0) {
Tloc292816 test0;
Tloc292816 u0;
Tloc292816 v0;
memset((void*)(&test0), 0, sizeof(test0));
memset((void*)(&u0), 0, sizeof(u0));
memset((void*)(&v0), 0, sizeof(v0));
{
NI i_553525_839829468;
NI HEX3Atmp_554039_839829468;
NI LOC2;
NI res_554042_839829468;
i_553525_839829468 = (NI)0;
HEX3Atmp_554039_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(e0);
HEX3Atmp_554039_839829468 = (NI)(LOC2 - ((NI) 1));
res_554042_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tsym292834* op0;
Tnode292802* disc0;
Ropeobj178006* o0;
Tsym292834* d0;
NI id0;
Tnode292802* LOC9;
Ropeobj178006* strlit0;
if (!(res_554042_839829468 <= HEX3Atmp_554039_839829468)) goto LA4;
i_553525_839829468 = res_554042_839829468;
it0 = (*e0).kindU.S6.sons->data[i_553525_839829468];
op0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA7;
it0 = (*it0).kindU.S6.sons->data[((NI) 1)];
}
LA7: ;
disc0 = skipconv_328882_3876443242((*it0).kindU.S6.sons->data[((NI) 2)]);
initloc_532273_839829468((&test0), ((Tlockind292808) 0), (*it0).typ, ((Tstorageloc292812) 2));
initlocexpr_539283_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&u0));
o0 = obj0;
d0 = lookupfieldagain_553154_839829468(p0, origty0, (*disc0).kindU.S4.sym, &o0);
initloc_532273_839829468((&v0), ((Tlockind292808) 6), (*d0).typ, ((Tstorageloc292812) 0));
v0.r = o0;
add_178487_2381377266(&v0.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&v0.r, (*d0).loc.r);
geninexpraux_553496_839829468(p0, it0, (&u0), (&v0), (&test0));
LOC9 = (Tnode292802*)0;
LOC9 = newstrnode_293677_850551059(((Tnodekind292020) 20), (*(*field0).name).s);
id0 = nodetabletestorset_342682_1142335848((&(*(*p0).module).datacache), LOC9, ((NI) ((*(*p0).module).labels)));
{
if (!(id0 == ((NI) ((*(*p0).module).labels)))) goto LA12;
strlit0 = getstrlit_549468_839829468((*p0).module, (*(*field0).name).s);
}
goto LA10;
LA12: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = rope_178401_2381377266(((NI64) (id0)));
strlit0 = HEX26_178418_2381377266((*(*p0).module).tmpbase, LOC15);
}
LA10: ;
{
TY532811 LOC20;
if (!((*op0).magic == ((Tmagic292524) 99))) goto LA18;
memset((void*)LOC20, 0, sizeof(LOC20));
LOC20[0] = rdloc_538188_839829468((&test0));
LOC20[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_534), LOC20, 2);
}
goto LA16;
LA18: ;
{
TY532811 LOC22;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = rdloc_538188_839829468((&test0));
LOC22[1] = strlit0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_535), LOC22, 2);
}
LA16: ;
res_554042_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genobjconstr_554903_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 tmp0;
Ttype292840* t0;
NIM_BOOL isref0;
Ropeobj178006* r0;
Ropeobj178006* LOC13;
Ttype292840* ty0;
{ {
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = handleconstexpr_554853_839829468(p0, e0, d0);
if (!LOC3) goto LA4;
goto BeforeRet;
}
LA4: ;
memset((void*)(&tmp0), 0, sizeof(tmp0));
t0 = skiptypes_296099_850551059((*e0).typ, IL64(211106232576256));
gettemp_537032_839829468(p0, t0, (&tmp0), NIM_FALSE);
isref0 = ((*t0).kind == ((Ttypekind292244) 22));
r0 = rdloc_538188_839829468((&tmp0));
{
Ttype292840* LOC10;
TY178507 LOC11;
if (!isref0) goto LA8;
rawgennew_554741_839829468(p0, (&tmp0), NIM_NIL);
LOC10 = (Ttype292840*)0;
LOC10 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC10, IL64(211106232576256));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC11, 1);
gcusage_554439_839829468(e0);
}
goto LA6;
LA8: ;
{
constructloc_538388_839829468(p0, (&tmp0), NIM_FALSE);
}
LA6: ;
LOC13 = (Ropeobj178006*)0;
LOC13 = gettypedesc_535673_839829468((*p0).module, t0);
ty0 = getuniquetype_528640_2036603609(t0);
{
NI i_554944_839829468;
NI HEX3Atmp_554997_839829468;
NI LOC15;
NI res_555000_839829468;
i_554944_839829468 = (NI)0;
HEX3Atmp_554997_839829468 = (NI)0;
LOC15 = (NI)0;
LOC15 = len_293081_850551059(e0);
HEX3Atmp_554997_839829468 = (LOC15 - 1);
res_555000_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* it0;
Tloc292816 tmp20;
Tsym292834* field0;
if (!(res_555000_839829468 <= HEX3Atmp_554997_839829468)) goto LA17;
i_554944_839829468 = res_555000_839829468;
it0 = (*e0).kindU.S6.sons->data[i_554944_839829468];
memset((void*)(&tmp20), 0, sizeof(tmp20));
tmp20.r = r0;
field0 = lookupfieldagain_553154_839829468(p0, ty0, (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym, &tmp20.r);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA20;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_533));
}
LA20: ;
{
NIM_BOOL LOC24;
NI LOC25;
LOC24 = (NIM_BOOL)0;
LOC25 = (NI)0;
LOC25 = len_293081_850551059(it0);
LOC24 = (LOC25 == ((NI) 3));
if (!(LOC24)) goto LA26;
LOC24 = (((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0);
LA26: ;
if (!LOC24) goto LA27;
genfieldcheck_553504_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 2)], r0, field0, ty0);
}
LA27: ;
add_178487_2381377266(&tmp20.r, ((NimStringDesc*) &T839829468_257));
add_178482_2381377266(&tmp20.r, (*field0).loc.r);
tmp20.k = ((Tlockind292808) 1);
tmp20.t = (*field0).loc.t;
{
if (!isref0) goto LA31;
tmp20.s = ((Tstorageloc292812) 3);
}
goto LA29;
LA31: ;
{
tmp20.s = ((Tstorageloc292812) 2);
}
LA29: ;
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], (&tmp20));
res_555000_839829468 += ((NI) 1);
} LA17: ;
}
}
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA36;
genericAssign((void*)(&(*d0)), (void*)(&tmp0), (&NTI292816));
}
goto LA34;
LA36: ;
{
genassignment_539264_839829468(p0, (&(*d0)), (&tmp0), 0);
}
LA34: ;
}BeforeRet: ;
}
N_NIMCALL(void, gencast_556538_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* destt0;
Ttype292840* srct0;
destt0 = skiptypes_296099_850551059((*e0).typ, IL64(211106233624832));
srct0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106233624832));
{
NIM_BOOL LOC3;
Ropeobj178006* lbl0;
Tloc292816 tmp0;
TY178507 LOC7;
TY535238 LOC8;
TY178507 LOC9;
Ropeobj178006* LOC10;
LOC3 = (NIM_BOOL)0;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*destt0).kind)&63U)))!=0);
if (LOC3) goto LA4;
LOC3 = ((IL64(1030792609808) &((NU64)1<<((NU)((*srct0).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
(*p0).labels += ((NI) 1);
lbl0 = rope_178401_2381377266(((NI64) ((*p0).labels)));
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = lbl0;
tmp0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_536), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, srct0);
LOC8[1] = gettypedesc_535673_839829468((*p0).module, destt0);
LOC8[2] = lbl0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_537), LOC8, 3);
tmp0.k = ((Tlockind292808) 6);
tmp0.t = srct0;
tmp0.s = ((Tstorageloc292812) 2);
tmp0.flags = 0;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = lbl0;
LOC10 = (Ropeobj178006*)0;
LOC10 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_538), LOC9, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC10, tmp0.s);
}
goto LA1;
LA5: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
N_NIMCALL(void, genconv_556633_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Ttype292840* desttype0;
desttype0 = skiptypes_296099_850551059((*e0).typ, 8390656);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = comparetypes_326214_3876443242(desttype0, (*(*e0).kindU.S6.sons->data[((NI) 1)]).typ, ((Tdistinctcompare324427) 1), 0);
if (!LOC3) goto LA4;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], d0);
}
goto LA1;
LA4: ;
{
gensomecast_556481_839829468(p0, e0, d0);
}
LA1: ;
}
static N_INLINE(NIM_BOOL, iscppref_552807_839829468)(Tcproc529021* p0, Ttype292840* typ0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
NIM_BOOL LOC3;
Ttype292840* LOC6;
Ttype292840* LOC8;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
LOC2 = LOC3;
if (!(LOC2)) goto LA5;
LOC6 = (Ttype292840*)0;
LOC6 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC2 = ((*LOC6).kind == ((Ttypekind292244) 23));
LA5: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA7;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059(typ0, IL64(211106232576256));
LOC1 = !((((*LOC8).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA7: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genaddr_553051_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Ttype292840* LOC3;
Tloc292816 a0;
Ropeobj178006* LOC6;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((6291456 &((NU64)1<<((NU)((*LOC3).kind)&63U)))!=0)) goto LA4;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC6 = (Ropeobj178006*)0;
LOC6 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), a0.r);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC6, a0.s);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC8;
Tctypekind529007 LOC9;
LOC8 = (NIM_BOOL)0;
LOC9 = (Tctypekind529007)0;
LOC9 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LOC8 = (LOC9 == ((Tctypekind529007) 17));
if (LOC8) goto LA10;
LOC8 = iscppref_552807_839829468(p0, (*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
LA10: ;
if (!LOC8) goto LA11;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA11: ;
{
Tloc292816 a0;
Ropeobj178006* LOC14;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC14 = (Ropeobj178006*)0;
LOC14 = addrloc_538204_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC14, a0.s);
}
LA1: ;
}
N_NIMCALL(void, genarrayelem_554093_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC1;
Ropeobj178006* first0;
NI64 LOC2;
Ttype292840* LOC47;
Ttype292840* LOC48;
TY535238 LOC49;
Ropeobj178006* LOC50;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
ty0 = skiptypes_296099_850551059(LOC1, IL64(211106247256320));
LOC2 = (NI64)0;
LOC2 = firstord_320001_3876443242(ty0);
first0 = intliteral_539270_839829468(LOC2);
{
NIM_BOOL LOC5;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0);
if (!(LOC5)) goto LA6;
LOC5 = !((((*ty0).flags &(1U<<((NU)(((Ttypeflag292431) 0))&31U)))!=0));
LA6: ;
if (!LOC5) goto LA7;
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = isconstexpr_318510_2616423590(y0);
if (!!(LOC11)) goto LA12;
{
NI64 LOC16;
LOC16 = (NI64)0;
LOC16 = firstord_320001_3876443242(ty0);
if (!(LOC16 == IL64(0))) goto LA17;
{
NIM_BOOL LOC21;
NI64 LOC22;
NI64 LOC23;
NI64 LOC25;
NI64 LOC26;
TY532811 LOC29;
NI64 LOC30;
LOC21 = (NIM_BOOL)0;
LOC22 = (NI64)0;
LOC22 = firstord_320001_3876443242(b0.t);
LOC23 = (NI64)0;
LOC23 = firstord_320001_3876443242(ty0);
LOC21 = (LOC22 < LOC23);
if (LOC21) goto LA24;
LOC25 = (NI64)0;
LOC25 = lastord_320004_3876443242(ty0);
LOC26 = (NI64)0;
LOC26 = lastord_320004_3876443242(b0.t);
LOC21 = (LOC25 < LOC26);
LA24: ;
if (!LOC21) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdcharloc_538227_839829468((&b0));
LOC30 = (NI64)0;
LOC30 = lastord_320004_3876443242(ty0);
LOC29[1] = intliteral_539270_839829468(LOC30);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_539), LOC29, 2);
}
LA27: ;
}
goto LA14;
LA17: ;
{
TY535238 LOC32;
NI64 LOC33;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rdcharloc_538227_839829468((&b0));
LOC32[1] = first0;
LOC33 = (NI64)0;
LOC33 = lastord_320004_3876443242(ty0);
LOC32[2] = intliteral_539270_839829468(LOC33);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_540), LOC32, 3);
}
LA14: ;
}
goto LA9;
LA12: ;
{
NI64 idx0;
idx0 = getordvalue_320129_3876443242(y0);
{
NIM_BOOL LOC37;
NI64 LOC38;
NI64 LOC40;
LOC37 = (NIM_BOOL)0;
LOC38 = (NI64)0;
LOC38 = firstord_320001_3876443242(ty0);
LOC37 = (idx0 < LOC38);
if (LOC37) goto LA39;
LOC40 = (NI64)0;
LOC40 = lastord_320004_3876443242(ty0);
LOC37 = (LOC40 < idx0);
LA39: ;
if (!LOC37) goto LA41;
localerror_196080_155036129((*x0).info, ((Tmsgkind191002) 86), ((NimStringDesc*) &T839829468_490));
}
LA41: ;
}
LA9: ;
}
LA7: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA45;
(*d0).s = a0.s;
}
LA45: ;
LOC47 = (Ttype292840*)0;
LOC47 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC48 = (Ttype292840*)0;
LOC48 = elemtype_320394_3876443242(LOC47);
memset((void*)LOC49, 0, sizeof(LOC49));
LOC49[0] = rdloc_538188_839829468((&a0));
LOC49[1] = rdcharloc_538227_839829468((&b0));
LOC49[2] = first0;
LOC50 = (Ropeobj178006*)0;
LOC50 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_541), LOC49, 3);
putintodest_550468_839829468(p0, d0, LOC48, LOC50, a0.s);
}
N_NIMCALL(void, genopenarrayelem_554169_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* LOC10;
Ttype292840* LOC11;
TY532811 LOC12;
Ropeobj178006* LOC13;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
{
TY532811 LOC5;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&b0));
LOC5[1] = rdloc_538188_839829468((&a0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_542), LOC5, 2);
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA8;
(*d0).s = a0.s;
}
LA8: ;
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC11 = (Ttype292840*)0;
LOC11 = elemtype_320394_3876443242(LOC10);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rdloc_538188_839829468((&a0));
LOC12[1] = rdcharloc_538227_839829468((&b0));
LOC13 = (Ropeobj178006*)0;
LOC13 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC12, 2);
putintodest_550468_839829468(p0, d0, LOC11, LOC13, a0.s);
}
N_NIMCALL(void, genseqelem_554205_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC27;
Ttype292840* LOC28;
TY532811 LOC29;
Ropeobj178006* LOC30;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
{
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 4))&31U)))!=0)) goto LA8;
{
TY535238 LOC14;
if (!((*ty0).kind == ((Ttypekind292244) 28))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&b0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_543), LOC14, 3);
}
goto LA10;
LA12: ;
{
TY535238 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rdloc_538188_839829468((&b0));
LOC16[1] = rdloc_538188_839829468((&a0));
LOC16[2] = lenfield_539305_839829468(p0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_544), LOC16, 3);
}
LA10: ;
}
LA8: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA19;
(*d0).s = ((Tstorageloc292812) 3);
}
LA19: ;
{
Ttype292840* LOC23;
TY178507 LOC26;
LOC23 = (Ttype292840*)0;
LOC23 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
if (!((6291456 &((NU64)1<<((NU)((*LOC23).kind)&63U)))!=0)) goto LA24;
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = a0.r;
a0.r = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_124), LOC26, 1);
}
LA24: ;
LOC27 = (Ttype292840*)0;
LOC27 = skiptypes_296099_850551059(a0.t, IL64(211106240964864));
LOC28 = (Ttype292840*)0;
LOC28 = elemtype_320394_3876443242(LOC27);
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = rdloc_538188_839829468((&a0));
LOC29[1] = rdcharloc_538227_839829468((&b0));
LOC30 = (Ropeobj178006*)0;
LOC30 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_187), LOC29, 2);
putintodest_550468_839829468(p0, d0, LOC28, LOC30, a0.s);
}
N_NIMCALL(void, gencstringelem_554144_839829468)(Tcproc529021* p0, Tnode292802* x0, Tnode292802* y0, Tloc292816* d0) {
Tloc292816 a0;
Tloc292816 b0;
Ttype292840* ty0;
Ttype292840* LOC5;
Ttype292840* LOC6;
TY532811 LOC7;
Ropeobj178006* LOC8;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, x0, (&a0));
initlocexpr_539283_839829468(p0, y0, (&b0));
ty0 = skiptypes_296099_850551059(a0.t, IL64(211106242013440));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(ty0, IL64(211106240964864));
LOC6 = (Ttype292840*)0;
LOC6 = elemtype_320394_3876443242(LOC5);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
LOC7[1] = rdcharloc_538227_839829468((&b0));
LOC8 = (Ropeobj178006*)0;
LOC8 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_138), LOC7, 2);
putintodest_550468_839829468(p0, d0, LOC6, LOC8, a0.s);
}
N_NIMCALL(void, gentupleelem_553124_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
NI i0;
Ropeobj178006* LOC5;
Ttype292840* ty0;
Ropeobj178006* r0;
TY178507 LOC8;
memset((void*)(&a0), 0, sizeof(a0));
i0 = (NI)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA3;
(*d0).s = a0.s;
}
LA3: ;
LOC5 = (Ropeobj178006*)0;
LOC5 = gettypedesc_535673_839829468((*p0).module, a0.t);
ty0 = getuniquetype_528640_2036603609(a0.t);
r0 = rdloc_538188_839829468((&a0));
switch ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind) {
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
{
i0 = ((NI) ((*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval));
}
break;
default:
{
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_545));
}
break;
}
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rope_178401_2381377266(((NI64) (i0)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC8, 1);
putintodest_550468_839829468(p0, d0, (*ty0).sons->data[i0], r0, a0.s);
}
N_NIMCALL(void, genbracketexpr_554277_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Ttype292840* ty0;
ty0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
{
Ttype292840* LOC5;
if (!((6291456 &((NU64)1<<((NU)((*ty0).kind)&63U)))!=0)) goto LA3;
LOC5 = (Ttype292840*)0;
LOC5 = lastson_295377_850551059(ty0);
ty0 = skiptypes_296099_850551059(LOC5, IL64(211106242013440));
}
LA3: ;
switch ((*ty0).kind) {
case ((Ttypekind292244) 16):
case ((Ttypekind292244) 4):
{
genarrayelem_554093_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 27):
case ((Ttypekind292244) 48):
{
genopenarrayelem_554169_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 24):
case ((Ttypekind292244) 28):
{
genseqelem_554205_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 29):
{
gencstringelem_554144_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (*n0).kindU.S6.sons->data[((NI) 1)], d0);
}
break;
case ((Ttypekind292244) 18):
{
gentupleelem_553124_839829468(p0, n0, d0);
}
break;
default:
{
NimStringDesc* LOC12;
LOC12 = (NimStringDesc*)0;
LOC12 = rawNewString(reprEnum((NI)(*ty0).kind, (&NTI292244))->Sup.len + 21);
appendString(LOC12, ((NimStringDesc*) &T839829468_547));
appendString(LOC12, reprEnum((NI)(*ty0).kind, (&NTI292244)));
appendChar(LOC12, 41);
internalerror_196100_155036129((*n0).info, LOC12);
}
break;
}
}
N_NIMCALL(void, genderef_543921_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, NIM_BOOL enforcederef0) {
Tctypekind529007 mt0;
{ mt0 = maptype_533394_839829468((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((393216 &(1U<<((NU)(mt0)&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = !(enforcederef0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
{
Ttype292840* LOC9;
LOC9 = (Ttype292840*)0;
LOC9 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
if (!((*LOC9).kind == ((Ttypekind292244) 22))) goto LA10;
(*d0).s = ((Tstorageloc292812) 3);
}
LA10: ;
}
goto LA1;
LA5: ;
{
Tloc292816 a0;
Ttype292840* typ0;
memset((void*)(&a0), 0, sizeof(a0));
typ0 = skiptypes_296099_850551059((*(*e0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
NIM_BOOL LOC17;
NIM_BOOL LOC20;
Tnode292802* LOC25;
Tnode292802* LOC26;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC17 = (NIM_BOOL)0;
LOC17 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC17)) goto LA18;
LOC17 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA18: ;
LOC16 = LOC17;
if (!(LOC16)) goto LA19;
LOC20 = (NIM_BOOL)0;
LOC20 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC20) goto LA21;
LOC20 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA21: ;
LOC16 = LOC20;
LA19: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA22;
LOC15 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 64));
LA22: ;
if (!LOC15) goto LA23;
LOC25 = (Tnode292802*)0;
LOC25 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
LOC26 = (Tnode292802*)0;
LOC26 = HEX5BHEX5D_293238_850551059(LOC25, ((NI) 0));
initlocexprsingleuse_539289_839829468(p0, LOC26, d0);
goto BeforeRet;
}
goto LA13;
LA23: ;
{
initlocexprsingleuse_539289_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA13: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA30;
switch ((*typ0).kind) {
case ((Ttypekind292244) 22):
{
(*d0).s = ((Tstorageloc292812) 3);
}
break;
case ((Ttypekind292244) 23):
{
(*d0).s = ((Tstorageloc292812) 0);
{
NIM_BOOL LOC36;
NIM_BOOL LOC37;
NIM_BOOL LOC39;
Ropeobj178006* LOC44;
LOC36 = (NIM_BOOL)0;
LOC37 = (NIM_BOOL)0;
LOC37 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
if (!(LOC37)) goto LA38;
LOC39 = (NIM_BOOL)0;
LOC39 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC39) goto LA40;
LOC39 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA40: ;
LOC37 = LOC39;
LA38: ;
LOC36 = LOC37;
if (!(LOC36)) goto LA41;
LOC36 = ((*e0).kind == ((Tnodekind292020) 65));
LA41: ;
if (!LOC36) goto LA42;
LOC44 = (Ropeobj178006*)0;
LOC44 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC44, a0.s);
goto BeforeRet;
}
LA42: ;
}
break;
case ((Ttypekind292244) 21):
{
(*d0).s = ((Tstorageloc292812) 0);
}
break;
default:
{
NimStringDesc* LOC47;
LOC47 = (NimStringDesc*)0;
LOC47 = rawNewString(reprEnum((NI)(*typ0).kind, (&NTI292244))->Sup.len + 9);
appendString(LOC47, ((NimStringDesc*) &T839829468_548));
appendString(LOC47, reprEnum((NI)(*typ0).kind, (&NTI292244)));
internalerror_196100_155036129((*e0).info, LOC47);
}
break;
}
}
goto LA28;
LA30: ;
{
NIM_BOOL LOC49;
LOC49 = (NIM_BOOL)0;
LOC49 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC49) goto LA50;
LOC49 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA50: ;
if (!LOC49) goto LA51;
{
NIM_BOOL LOC55;
NIM_BOOL LOC56;
Ropeobj178006* LOC61;
LOC55 = (NIM_BOOL)0;
LOC56 = (NIM_BOOL)0;
LOC56 = ((*typ0).kind == ((Ttypekind292244) 23));
if (!(LOC56)) goto LA57;
LOC56 = !((((*typ0).flags &(1U<<((NU)(((Ttypeflag292431) 18))&31U)))!=0));
LA57: ;
LOC55 = LOC56;
if (!(LOC55)) goto LA58;
LOC55 = ((*e0).kind == ((Tnodekind292020) 65));
LA58: ;
if (!LOC55) goto LA59;
LOC61 = (Ropeobj178006*)0;
LOC61 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC61, a0.s);
goto BeforeRet;
}
LA59: ;
}
goto LA28;
LA51: ;
LA28: ;
{
NIM_BOOL LOC64;
Ropeobj178006* LOC68;
LOC64 = (NIM_BOOL)0;
LOC64 = enforcederef0;
if (!(LOC64)) goto LA65;
LOC64 = (mt0 == ((Tctypekind529007) 18));
LA65: ;
if (!LOC64) goto LA66;
LOC68 = (Ropeobj178006*)0;
LOC68 = rdloc_538188_839829468((&a0));
putintodest_550468_839829468(p0, d0, (*a0.t).sons->data[((NI) 0)], LOC68, a0.s);
}
goto LA62;
LA66: ;
{
TY178507 LOC70;
Ropeobj178006* LOC71;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rdloc_538188_839829468((&a0));
LOC71 = (Ropeobj178006*)0;
LOC71 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC70, 1);
putintodest_550468_839829468(p0, d0, (*e0).typ, LOC71, a0.s);
}
LA62: ;
}
LA1: ;
}BeforeRet: ;
}
N_NIMCALL(Ttype292840*, genrecordfieldaux_553096_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0, Tloc292816* a0) {
Ttype292840* result0;
Ropeobj178006* LOC9;
result0 = (Ttype292840*)0;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], a0);
{
if (!!(((*(*e0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 3)))) goto LA3;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_549));
}
LA3: ;
{
if (!((*d0).k == ((Tlockind292808) 0))) goto LA7;
(*d0).s = (*a0).s;
}
LA7: ;
LOC9 = (Ropeobj178006*)0;
LOC9 = gettypedesc_535673_839829468((*p0).module, (*a0).t);
result0 = getuniquetype_528640_2036603609((*a0).t);
return result0;
}
N_NIMCALL(void, genrecordfield_553448_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, e0, d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*e0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
{
TY178507 LOC5;
if (!((*ty0).kind == ((Ttypekind292244) 18))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*f0).position)));
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_546), LOC5, 1);
putintodest_550468_839829468(p0, d0, (*f0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
Tsym292834* field0;
TY178507 LOC11;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA9;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_550));
}
LA9: ;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*field0).loc.r;
addf_179205_2381377266(&r0, ((NimStringDesc*) &T839829468_551), LOC11, 1);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
LA1: ;
}
N_NIMCALL(void, gencheckedrecordfield_554046_839829468)(Tcproc529021* p0, Tnode292802* e0, Tloc292816* d0) {
{
Tloc292816 a0;
Ttype292840* ty0;
Ropeobj178006* r0;
Tsym292834* f0;
Tsym292834* field0;
TY178507 LOC9;
Ropeobj178006* LOC10;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
memset((void*)(&a0), 0, sizeof(a0));
ty0 = genrecordfieldaux_553096_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0, (&a0));
r0 = rdloc_538188_839829468((&a0));
f0 = (*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
field0 = lookupfieldagain_553154_839829468(p0, ty0, f0, &r0);
{
if (!((*field0).loc.r == NIM_NIL)) goto LA7;
internalerror_196100_155036129((*e0).info, ((NimStringDesc*) &T839829468_532));
}
LA7: ;
genfieldcheck_553504_839829468(p0, e0, r0, field0, ty0);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = (*field0).loc.r;
LOC10 = (Ropeobj178006*)0;
LOC10 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_551), LOC9, 1);
add_178482_2381377266(&r0, LOC10);
putintodest_550468_839829468(p0, d0, (*field0).typ, r0, a0.s);
}
goto LA1;
LA3: ;
{
genrecordfield_553448_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], d0);
}
LA1: ;
}
N_NIMCALL(NI, startblock_543978_839829468)(Tcproc529021* p0, NimStringDesc* start0, Ropeobj178006** args0, NI args0Len0) {
NI result0;
result0 = (NI)0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), start0, args0, args0Len0);
(*p0).labels += ((NI) 1);
result0 = ((*p0).blocks ? (*p0).blocks->Sup.len : 0);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) ((NI)(result0 + ((NI) 1)))));
(*p0).blocks->data[result0].id = ((NI) ((*p0).labels));
(*p0).blocks->data[result0].nestedtrystmts = ((NI16) (((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0)));
(*p0).blocks->data[result0].nestedexceptstmts = ((NI16) ((*p0).inexceptblock));
return result0;
}
N_NIMCALL(Ropeobj178006*, blockbody_544025_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = (*b0).sections[(((Tcprocsection529011) 0))- 0];
{
TY178507 LOC5;
if (!(((NI16) 0) < (*b0).framelen)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178401_2381377266(((NI64) ((*b0).framelen)));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_554), LOC5, 1);
}
LA3: ;
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 1))- 0]);
add_178482_2381377266(&result0, (*b0).sections[(((Tcprocsection529011) 2))- 0]);
return result0;
}
N_NIMCALL(void, endblock_544035_839829468)(Tcproc529021* p0, Ropeobj178006* blockend0) {
NI topblock0;
Ropeobj178006* LOC1;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
LOC1 = (Ropeobj178006*)0;
LOC1 = blockbody_544025_839829468((&(*p0).blocks->data[topblock0]));
add_178482_2381377266(&(*p0).blocks->data[(NI)(topblock0 - ((NI) 1))].sections[(((Tcprocsection529011) 2))- 0], LOC1);
(*p0).blocks = (TY529095*) setLengthSeq(&((*p0).blocks)->Sup, sizeof(Tblock529019), ((NI) (topblock0)));
line_532690_839829468(p0, ((Tcprocsection529011) 2), blockend0);
}
N_NIMCALL(void, endblock_544060_839829468)(Tcproc529021* p0) {
NI topblock0;
Ropeobj178006* blockend0;
NI16 framelen0;
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
{
TY178507 LOC5;
if (!!(((*p0).blocks->data[topblock0].label == NIM_NIL))) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = (*p0).blocks->data[topblock0].label;
blockend0 = ropecg_532407_839829468(NIM_NIL, ((NimStringDesc*) &T839829468_552), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY533289 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
blockend0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_160), LOC7, 0);
}
LA1: ;
framelen0 = (*p0).blocks->data[topblock0].framelen;
{
TY178507 LOC12;
if (!(((NI16) 0) < framelen0)) goto LA10;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178401_2381377266(((NI64) (framelen0)));
addf_179205_2381377266(&blockend0, ((NimStringDesc*) &T839829468_553), LOC12, 1);
}
LA10: ;
endblock_544035_839829468(p0, blockend0);
}
N_NIMCALL(void, genblock_546083_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI oldbreakidx_546099_839829468;
TY533289 LOC8;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
oldbreakidx_546099_839829468 = (*p0).breakidx;
memset((void*)LOC8, 0, sizeof(LOC8));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC8, 0);
{
Tsym292834* sym0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
(*sym0).loc.k = ((Tlockind292808) 10);
(*sym0).position = (NI)((*p0).breakidx + ((NI) 1));
}
LA11: ;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], d0);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546099_839829468;
}
N_NIMCALL(void, genstmtlistexpr_558402_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI length0;
length0 = sonslen_295351_850551059(n0);
{
NI i_558420_839829468;
NI HEX3Atmp_558424_839829468;
NI res_558427_839829468;
i_558420_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)0;
HEX3Atmp_558424_839829468 = (NI)(length0 - ((NI) 2));
res_558427_839829468 = ((NI) 0);
{
while (1) {
if (!(res_558427_839829468 <= HEX3Atmp_558424_839829468)) goto LA3;
i_558420_839829468 = res_558427_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_558420_839829468]);
res_558427_839829468 += ((NI) 1);
} LA3: ;
}
}
{
if (!(((NI) 0) < length0)) goto LA6;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
}
LA6: ;
}
N_NIMCALL(void, genif_544982_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ropeobj178006* lelse0;
Ropeobj178006* lend0;
memset((void*)(&a0), 0, sizeof(a0));
lelse0 = (Ropeobj178006*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*n0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, n0);
lend0 = getlabel_539217_839829468(p0);
{
NI i_545011_839829468;
NI HEX3Atmp_545435_839829468;
NI LOC9;
NI res_545438_839829468;
i_545011_839829468 = (NI)0;
HEX3Atmp_545435_839829468 = (NI)0;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059(n0);
HEX3Atmp_545435_839829468 = (NI)(LOC9 - ((NI) 1));
res_545438_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545438_839829468 <= HEX3Atmp_545435_839829468)) goto LA11;
i_545011_839829468 = res_545438_839829468;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC14)) goto LA15;
LOC14 = isemptytype_297441_850551059((*n0).typ);
LA15: ;
if (!LOC14) goto LA16;
(*d0).k = ((Tlockind292808) 0);
}
LA16: ;
it0 = (*n0).kindU.S6.sons->data[i_545011_839829468];
{
NI LOC20;
TY533289 LOC23;
NI LOC24;
TY532811 LOC25;
LOC20 = (NI)0;
LOC20 = len_293081_850551059(it0);
if (!(LOC20 == ((NI) 2))) goto LA21;
memset((void*)LOC23, 0, sizeof(LOC23));
LOC24 = (NI)0;
LOC24 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC23, 0);
initlocexprsingleuse_539289_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], (&a0));
lelse0 = getlabel_539217_839829468(p0);
(*p0).labels += ((NI) 1);
memset((void*)LOC25, 0, sizeof(LOC25));
LOC25[0] = rdloc_538188_839829468((&a0));
LOC25[1] = lelse0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC25, 2);
{
NIM_BOOL LOC28;
Ropeobj178006** LOC32;
Ropeobj178006** LOC33;
LOC28 = (NIM_BOOL)0;
LOC28 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC28) goto LA29;
LOC28 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA29: ;
if (!LOC28) goto LA30;
LOC32 = (Ropeobj178006**)0;
LOC32 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC32, ((NimStringDesc*) &T839829468_223));
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
LOC33 = (Ropeobj178006**)0;
LOC33 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178487_2381377266(LOC33, ((NimStringDesc*) &T839829468_280));
}
goto LA26;
LA30: ;
{
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)], d0);
}
LA26: ;
endblock_544060_839829468(p0);
{
NI LOC37;
TY178507 LOC40;
LOC37 = (NI)0;
LOC37 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC37)) goto LA38;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC40, 1);
}
LA38: ;
fixlabel_539230_839829468(p0, lelse0);
}
goto LA18;
LA21: ;
{
NI LOC42;
TY533289 LOC45;
NI LOC46;
LOC42 = (NI)0;
LOC42 = len_293081_850551059(it0);
if (!(LOC42 == ((NI) 1))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 0)], d0);
endblock_544060_839829468(p0);
}
goto LA18;
LA43: ;
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_557));
}
LA18: ;
res_545438_839829468 += ((NI) 1);
} LA11: ;
}
}
{
NI LOC50;
LOC50 = (NI)0;
LOC50 = sonslen_295351_850551059(n0);
if (!(((NI) 1) < LOC50)) goto LA51;
fixlabel_539230_839829468(p0, lend0);
}
LA51: ;
}
N_NIMCALL(void, downconv_558581_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC3) goto LA4;
LOC3 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
expr_539248_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], d0);
}
goto LA1;
LA5: ;
{
Ttype292840* dest0;
Tnode292802* arg0;
Ttype292840* src0;
Tloc292816 a0;
Ropeobj178006* r0;
NIM_BOOL isref0;
Ttype292840* LOC10;
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
arg0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
while (1) {
if (!((*arg0).kind == ((Tnodekind292020) 66))) goto LA9;
arg0 = (*arg0).kindU.S6.sons->data[((NI) 0)];
} LA9: ;
}
src0 = skiptypes_296099_850551059((*arg0).typ, IL64(211106247256320));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, arg0, (&a0));
r0 = rdloc_538188_839829468((&a0));
LOC10 = (Ttype292840*)0;
LOC10 = skiptypes_296099_850551059((*arg0).typ, IL64(211106232576256));
isref0 = ((14680064 &((NU64)1<<((NU)((*LOC10).kind)&63U)))!=0);
{
if (!isref0) goto LA13;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_558));
}
goto LA11;
LA13: ;
{
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
}
LA11: ;
{
NI i_558650_839829468;
NI HEX3Atmp_558677_839829468;
NI LOC17;
NI res_558680_839829468;
i_558650_839829468 = (NI)0;
HEX3Atmp_558677_839829468 = (NI)0;
LOC17 = (NI)0;
LOC17 = inheritancediff_326252_3876443242(dest0, src0);
HEX3Atmp_558677_839829468 = (LOC17 > 0? (LOC17) : -(LOC17));
res_558680_839829468 = ((NI) 2);
{
while (1) {
if (!(res_558680_839829468 <= HEX3Atmp_558677_839829468)) goto LA19;
i_558650_839829468 = res_558680_839829468;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
res_558680_839829468 += ((NI) 1);
} LA19: ;
}
}
{
if (!isref0) goto LA22;
{
NIM_BOOL LOC26;
Ttype292840* LOC28;
TY532811 LOC31;
LOC26 = (NIM_BOOL)0;
LOC26 = ((*d0).k == ((Tlockind292808) 0));
if (!(LOC26)) goto LA27;
LOC28 = (Ttype292840*)0;
LOC28 = skiptypes_296099_850551059((*n0).typ, IL64(211106232576256));
LOC26 = ((14680064 &((NU64)1<<((NU)((*LOC28).kind)&63U)))!=0);
LA27: ;
if (!LOC26) goto LA29;
gettemp_537032_839829468(p0, (*n0).typ, d0, NIM_FALSE);
memset((void*)LOC31, 0, sizeof(LOC31));
LOC31[0] = rdloc_538188_839829468((&(*d0)));
LOC31[1] = r0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_559), LOC31, 2);
}
goto LA24;
LA29: ;
{
r0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_52), r0);
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA24: ;
}
goto LA20;
LA22: ;
{
putintodest_550468_839829468(p0, d0, (*n0).typ, r0, a0.s);
}
LA20: ;
}
LA1: ;
}
N_NIMCALL(void, upconv_558431_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106247256320));
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
Ropeobj178006* r0;
Ropeobj178006* nilcheck0;
Ttype292840* t0;
LOC3 = (NIM_BOOL)0;
LOC3 = (((*p0).options &(1U<<((NU)(((Toption169009) 1))&31U)))!=0);
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = isobjlackingtypefield_533515_839829468(dest0);
LOC3 = !(LOC5);
LA4: ;
if (!LOC3) goto LA6;
r0 = rdloc_538188_839829468((&a0));
nilcheck0 = NIM_NIL;
t0 = skiptypes_296099_850551059(a0.t, IL64(211106232576256));
{
while (1) {
Ttype292840* LOC23;
if (!((14680064 &((NU64)1<<((NU)((*t0).kind)&63U)))!=0)) goto LA9;
{
if (!!(((*t0).kind == ((Ttypekind292244) 23)))) goto LA12;
nilcheck0 = r0;
}
LA12: ;
{
NIM_BOOL LOC16;
NIM_BOOL LOC18;
TY178507 LOC22;
LOC16 = (NIM_BOOL)0;
LOC16 = !(((*t0).kind == ((Ttypekind292244) 23)));
if (LOC16) goto LA17;
LOC18 = (NIM_BOOL)0;
LOC18 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC18) goto LA19;
LOC18 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA19: ;
LOC16 = !(LOC18);
LA17: ;
if (!LOC16) goto LA20;
memset((void*)LOC22, 0, sizeof(LOC22));
LOC22[0] = r0;
r0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_124), LOC22, 1);
}
LA20: ;
LOC23 = (Ttype292840*)0;
LOC23 = lastson_295377_850551059(t0);
t0 = skiptypes_296099_850551059(LOC23, IL64(211106232576256));
} LA9: ;
}
{
NIM_BOOL LOC26;
LOC26 = (NIM_BOOL)0;
LOC26 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC26) goto LA27;
LOC26 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA27: ;
if (!!(LOC26)) goto LA28;
{
while (1) {
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = ((*t0).kind == ((Ttypekind292244) 17));
if (!(LOC32)) goto LA33;
LOC32 = !(((*t0).sons->data[((NI) 0)] == NIM_NIL));
LA33: ;
if (!LOC32) goto LA31;
add_178487_2381377266(&r0, ((NimStringDesc*) &T839829468_153));
t0 = skiptypes_296099_850551059((*t0).sons->data[((NI) 0)], IL64(211106247215360));
} LA31: ;
}
}
LA28: ;
{
TY535238 LOC38;
if (!!((nilcheck0 == NIM_NIL))) goto LA36;
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = nilcheck0;
LOC38[1] = r0;
LOC38[2] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_560), LOC38, 3);
}
goto LA34;
LA36: ;
{
TY532811 LOC40;
memset((void*)LOC40, 0, sizeof(LOC40));
LOC40[0] = r0;
LOC40[1] = gentypeinfo_535941_839829468((*p0).module, dest0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_561), LOC40, 2);
}
LA34: ;
}
LA6: ;
{
TY532811 LOC45;
Ropeobj178006* LOC46;
if (!!(((*(*(*n0).kindU.S6.sons->data[((NI) 0)]).typ).kind == ((Ttypekind292244) 17)))) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC45[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC45[1] = rdloc_538188_839829468((&a0));
LOC46 = (Ropeobj178006*)0;
LOC46 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC45, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC46, a0.s);
}
goto LA41;
LA43: ;
{
TY532811 LOC48;
Ropeobj178006* LOC49;
memset((void*)LOC48, 0, sizeof(LOC48));
LOC48[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC48[1] = addrloc_538204_839829468((&a0));
LOC49 = (Ropeobj178006*)0;
LOC49 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_429), LOC48, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC49, a0.s);
}
LA41: ;
}
N_NIMCALL(void, genrangechck_556591_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0, NimStringDesc* magic0) {
Tloc292816 a0;
Ttype292840* dest0;
memset((void*)(&a0), 0, sizeof(a0));
dest0 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
{
NIM_BOOL LOC3;
Ttype292840* LOC5;
TY532811 LOC8;
Ropeobj178006* LOC9;
LOC3 = (NIM_BOOL)0;
LOC3 = !((((*p0).options &(1U<<((NU)(((Toption169009) 3))&31U)))!=0));
if (LOC3) goto LA4;
LOC5 = (Ttype292840*)0;
LOC5 = skiptypes_296099_850551059(dest0, 1048576);
LOC3 = ((IL64(34084860461056) &((NU64)1<<((NU)((*LOC5).kind)&63U)))!=0);
LA4: ;
if (!LOC3) goto LA6;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC8[1] = rdcharloc_538227_839829468((&a0));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_430), LOC8, 2);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC9, a0.s);
}
goto LA1;
LA6: ;
{
TY536475 LOC11;
Ropeobj178006* LOC12;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = gettypedesc_535673_839829468((*p0).module, dest0);
LOC11[1] = rdcharloc_538227_839829468((&a0));
LOC11[2] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], dest0);
LOC11[3] = genliteral_549476_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 2)], dest0);
LOC11[4] = rope_178277_2381377266(magic0);
LOC12 = (Ropeobj178006*)0;
LOC12 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_562), LOC11, 5);
putintodest_550468_839829468(p0, d0, dest0, LOC12, a0.s);
}
LA1: ;
}
N_NIMCALL(void, convstrtocstr_556643_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_485), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
}
N_NIMCALL(void, convcstrtostr_556655_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
Tloc292816 a0;
Ttype292840* LOC1;
TY178507 LOC2;
Ropeobj178006* LOC3;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (Ttype292840*)0;
LOC1 = skiptypes_296099_850551059((*n0).typ, IL64(211106240964864));
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rdloc_538188_839829468((&a0));
LOC3 = (Ropeobj178006*)0;
LOC3 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_411), LOC2, 1);
putintodest_550468_839829468(p0, d0, LOC1, LOC3, a0.s);
gcusage_554439_839829468(n0);
}
static N_INLINE(NIM_BOOL, isroutine_297324_850551059)(Tsym292834* s0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
result0 = ((258048 &(1U<<((NU)((*s0).kind)&31U)))!=0);
return result0;
}
static N_INLINE(NIM_BOOL, isconstclosure_557810_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
NIM_BOOL LOC2;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC2 = (NIM_BOOL)0;
LOC2 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC2)) goto LA3;
LOC2 = isroutine_297324_850551059((*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym);
LA3: ;
LOC1 = LOC2;
if (!(LOC1)) goto LA4;
LOC1 = ((*(*n0).kindU.S6.sons->data[((NI) 1)]).kind == ((Tnodekind292020) 23));
LA4: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genclosure_557836_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
{
NIM_BOOL LOC3;
Ropeobj178006* tmp0;
Ropeobj178006* LOC6;
TY535238 LOC7;
LOC3 = (NIM_BOOL)0;
LOC3 = isconstclosure_557810_839829468(n0);
if (!LOC3) goto LA4;
(*(*p0).module).labels += ((NI) 1);
LOC6 = (Ropeobj178006*)0;
LOC6 = rope_178401_2381377266(((NI64) ((*(*p0).module).labels)));
tmp0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_566), LOC6);
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = gettypedesc_535673_839829468((*p0).module, (*n0).typ);
LOC7[1] = tmp0;
LOC7[2] = genconstexpr_554849_839829468(p0, n0);
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 8))- 0], ((NimStringDesc*) &T839829468_524), LOC7, 3);
putintodest_550468_839829468(p0, d0, (*n0).typ, tmp0, ((Tstorageloc292812) 1));
}
goto LA1;
LA4: ;
{
Tloc292816 tmp0;
Tloc292816 a0;
Tloc292816 b0;
TY535238 LOC14;
memset((void*)(&tmp0), 0, sizeof(tmp0));
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&b0), 0, sizeof(b0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&b0));
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = skipconv_328882_3876443242((*n0).kindU.S6.sons->data[((NI) 0)]);
if (!((*LOC11).kind == ((Tnodekind292020) 155))) goto LA12;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_567));
}
LA12: ;
gettemp_537032_839829468(p0, (*n0).typ, (&tmp0), NIM_FALSE);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rdloc_538188_839829468((&tmp0));
LOC14[1] = rdloc_538188_839829468((&a0));
LOC14[2] = rdloc_538188_839829468((&b0));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_568), LOC14, 3);
putlocintodest_539258_839829468(p0, d0, (&tmp0));
}
LA1: ;
}
static N_INLINE(Ropeobj178006*, assignlabel_544020_839829468)(Tblock529019* b0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = rope_178401_2381377266(((NI64) ((*b0).id)));
unsureAsgnRef((void**) (&(*b0).label), HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC1));
result0 = (*b0).label;
return result0;
}
N_NIMCALL(void, gencomputedgoto_545744_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI casepos0;
NI arraysize0;
NI id0;
Ropeobj178006* tmp0;
TY178507 LOC27;
Ropeobj178006* gotoarray0;
TY532811 LOC28;
TY178507 LOC33;
NI topblock0;
Ropeobj178006* oldbody0;
Ropeobj178006* tailb0;
Ropeobj178006* taila0;
Tnode292802* casestmt0;
Tloc292816 a_545871_839829468;
TY532811 LOC41;
{ casepos0 = ((NI) -1);
arraysize0 = (NI)0;
{
NI i_545768_839829468;
NI HEX3Atmp_545934_839829468;
NI LOC2;
NI res_545937_839829468;
i_545768_839829468 = (NI)0;
HEX3Atmp_545934_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_545934_839829468 = (LOC2 - 1);
res_545937_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
if (!(res_545937_839829468 <= HEX3Atmp_545934_839829468)) goto LA4;
i_545768_839829468 = res_545937_839829468;
it0 = (*n0).kindU.S6.sons->data[i_545768_839829468];
{
NI64 asize0;
if (!((*it0).kind == ((Tnodekind292020) 97))) goto LA7;
{
Tnode292802* LOC11;
LOC11 = (Tnode292802*)0;
LOC11 = lastson_295364_850551059(it0);
if (!!(((*LOC11).kind == ((Tnodekind292020) 85)))) goto LA12;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_570));
goto BeforeRet;
}
LA12: ;
casepos0 = i_545768_839829468;
asize0 = lengthord_320007_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
{
if (!(IL64(10000) < asize0)) goto LA16;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_571));
goto BeforeRet;
}
LA16: ;
arraysize0 = ((NI) (asize0));
{
NI64 LOC20;
LOC20 = (NI64)0;
LOC20 = firstord_320001_3876443242((*(*it0).kindU.S6.sons->data[((NI) 0)]).typ);
if (!!((LOC20 == IL64(0)))) goto LA21;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_572));
goto BeforeRet;
}
LA21: ;
}
LA7: ;
res_545937_839829468 += ((NI) 1);
} LA4: ;
}
}
{
if (!(casepos0 < ((NI) 0))) goto LA25;
localerror_196085_155036129((*n0).info, ((NimStringDesc*) &T839829468_573));
goto BeforeRet;
}
LA25: ;
id0 = (NI)(((NI) ((*p0).labels)) + ((NI) 1));
(*p0).labels += (NI)(arraysize0 + ((NI) 1));
memset((void*)LOC27, 0, sizeof(LOC27));
LOC27[0] = rope_178401_2381377266(((NI64) (id0)));
tmp0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_574), LOC27, 1);
memset((void*)LOC28, 0, sizeof(LOC28));
LOC28[0] = tmp0;
LOC28[1] = rope_178401_2381377266(((NI64) (arraysize0)));
gotoarray0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_575), LOC28, 2);
{
NI i_545819_839829468;
NI HEX3Atmp_545942_839829468;
NI res_545945_839829468;
i_545819_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)0;
HEX3Atmp_545942_839829468 = (NI)(arraysize0 - ((NI) 1));
res_545945_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC32;
if (!(res_545945_839829468 <= HEX3Atmp_545942_839829468)) goto LA31;
i_545819_839829468 = res_545945_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
LOC32[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + i_545819_839829468))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_576), LOC32, 1);
res_545945_839829468 += ((NI) 1);
} LA31: ;
}
}
memset((void*)LOC33, 0, sizeof(LOC33));
LOC33[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (id0)) + arraysize0))));
addf_179205_2381377266(&gotoarray0, ((NimStringDesc*) &T839829468_577), LOC33, 1);
line_532690_839829468(p0, ((Tcprocsection529011) 0), gotoarray0);
topblock0 = (NI)(((*p0).blocks ? (*p0).blocks->Sup.len : 0) - ((NI) 1));
oldbody0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545854_839829468;
NI HEX3Atmp_545950_839829468;
NI HEX3Atmp_545951_839829468;
NI LOC35;
NI res_545954_839829468;
j_545854_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)0;
HEX3Atmp_545951_839829468 = (NI)0;
HEX3Atmp_545950_839829468 = (NI)(casepos0 + ((NI) 1));
LOC35 = (NI)0;
LOC35 = len_293081_850551059(n0);
HEX3Atmp_545951_839829468 = (LOC35 - 1);
res_545954_839829468 = HEX3Atmp_545950_839829468;
{
while (1) {
if (!(res_545954_839829468 <= HEX3Atmp_545951_839829468)) goto LA37;
j_545854_839829468 = res_545954_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545854_839829468]);
res_545954_839829468 += ((NI) 1);
} LA37: ;
}
}
tailb0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), NIM_NIL);
{
NI j_545866_839829468;
NI HEX3Atmp_545959_839829468;
NI res_545962_839829468;
j_545866_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)0;
HEX3Atmp_545959_839829468 = (NI)(casepos0 - ((NI) 1));
res_545962_839829468 = ((NI) 0);
{
while (1) {
if (!(res_545962_839829468 <= HEX3Atmp_545959_839829468)) goto LA40;
j_545866_839829468 = res_545962_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[j_545866_839829468]);
res_545962_839829468 += ((NI) 1);
} LA40: ;
}
}
taila0 = (*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0];
asgnRefNoCycle((void**) (&(*p0).blocks->data[topblock0].sections[(((Tcprocsection529011) 2))- 0]), HEX26_178418_2381377266(oldbody0, taila0));
casestmt0 = (*n0).kindU.S6.sons->data[casepos0];
memset((void*)(&a_545871_839829468), 0, sizeof(a_545871_839829468));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a_545871_839829468));
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = tmp0;
LOC41[1] = rdloc_538188_839829468((&a_545871_839829468));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC41, 2);
{
NI i_545894_839829468;
NI HEX3Atmp_545978_839829468;
NI LOC43;
NI res_545981_839829468;
i_545894_839829468 = (NI)0;
HEX3Atmp_545978_839829468 = (NI)0;
LOC43 = (NI)0;
LOC43 = len_293081_850551059(casestmt0);
HEX3Atmp_545978_839829468 = (LOC43 - 1);
res_545981_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC46;
NI LOC47;
Tnode292802* it0;
Tnode292802* LOC57;
Ropeobj178006** LOC58;
Ropeobj178006** LOC59;
Tloc292816 a0;
TY532811 LOC60;
if (!(res_545981_839829468 <= HEX3Atmp_545978_839829468)) goto LA45;
i_545894_839829468 = res_545981_839829468;
memset((void*)LOC46, 0, sizeof(LOC46));
LOC47 = (NI)0;
LOC47 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC46, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545894_839829468];
{
NI j_545910_839829468;
NI HEX3Atmp_545970_839829468;
NI LOC49;
NI res_545973_839829468;
j_545910_839829468 = (NI)0;
HEX3Atmp_545970_839829468 = (NI)0;
LOC49 = (NI)0;
LOC49 = len_293081_850551059(it0);
HEX3Atmp_545970_839829468 = (NI)(LOC49 - ((NI) 2));
res_545973_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC56;
if (!(res_545973_839829468 <= HEX3Atmp_545970_839829468)) goto LA51;
j_545910_839829468 = res_545973_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545910_839829468]).kind == ((Tnodekind292020) 44))) goto LA54;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA54: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545910_839829468]);
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = intliteral_539270_839829468((NI64)((NI64)(val0 + ((NI64) (id0))) + IL64(1)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_580), LOC56, 1);
res_545973_839829468 += ((NI) 1);
} LA51: ;
}
}
LOC57 = (Tnode292802*)0;
LOC57 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC57);
LOC58 = (Ropeobj178006**)0;
LOC58 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC58, tailb0);
LOC59 = (Ropeobj178006**)0;
LOC59 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
add_178482_2381377266(LOC59, taila0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*casestmt0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC60, 0, sizeof(LOC60));
LOC60[0] = tmp0;
LOC60[1] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_578), LOC60, 2);
endblock_544060_839829468(p0);
res_545981_839829468 += ((NI) 1);
} LA45: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genwhilestmt_545985_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
NI oldbreakidx_546011_839829468;
TY533289 LOC1;
Tnode292802* loopbody0;
memset((void*)(&a0), 0, sizeof(a0));
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546011_839829468 = (*p0).breakidx;
memset((void*)LOC1, 0, sizeof(LOC1));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_569), LOC1, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
NIM_BOOL LOC4;
Ropeobj178006* label0;
TY532811 LOC8;
LOC4 = (NIM_BOOL)0;
LOC4 = !(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6)));
if (LOC4) goto LA5;
LOC4 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval == IL64(0));
LA5: ;
if (!LOC4) goto LA6;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[(*p0).breakidx]));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468((&a0));
LOC8[1] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_555), LOC8, 2);
}
LA6: ;
loopbody0 = (*t0).kindU.S6.sons->data[((NI) 1)];
{
NIM_BOOL LOC11;
LOC11 = (NIM_BOOL)0;
LOC11 = stmtscontainpragma_528083_2036603609(loopbody0, ((Tspecialword275003) 182));
if (!(LOC11)) goto LA12;
LOC11 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 1))&7U)))!=0);
LA12: ;
if (!LOC11) goto LA13;
{
NIM_BOOL LOC17;
NI LOC18;
LOC17 = (NIM_BOOL)0;
LOC18 = (NI)0;
LOC18 = len_293081_850551059(loopbody0);
LOC17 = (LOC18 == ((NI) 2));
if (!(LOC17)) goto LA19;
LOC17 = ((*(*loopbody0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1));
LA19: ;
if (!LOC17) goto LA20;
loopbody0 = (*loopbody0).kindU.S6.sons->data[((NI) 1)];
}
LA20: ;
gencomputedgoto_545744_839829468(p0, loopbody0);
}
goto LA9;
LA13: ;
{
genstmts_539244_839829468(p0, loopbody0);
}
LA9: ;
{
TY533289 LOC27;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 19))&31U)))!=0)) goto LA25;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_581), LOC27, 0);
}
LA25: ;
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546011_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, gengotovar_544258_839829468)(Tcproc529021* p0, Tnode292802* value0) {
{
if (!!(((*value0).kind >= ((Tnodekind292020) 5) && (*value0).kind <= ((Tnodekind292020) 15)))) goto LA3;
localerror_196085_155036129((*value0).info, ((NimStringDesc*) &T839829468_582));
}
goto LA1;
LA3: ;
{
TY178507 LOC6;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266((*value0).kindU.S1.intval);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_583), LOC6, 1);
}
LA1: ;
}
N_NIMCALL(void, varindynamiclib_538812_839829468)(Tcgen529027* m0, Tsym292834* sym0) {
Tlib292820* lib0;
Ropeobj178006* extname0;
Ropeobj178006* tmp0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY532811 LOC3;
lib0 = (*sym0).annex;
extname0 = (*sym0).loc.r;
loaddynamiclib_559481_839829468(m0, lib0);
(*sym0).loc.flags |= ((NU16)1)<<((((Tlocflag292810) 0))%(sizeof(NU16)*8));
tmp0 = mangledynlibproc_538816_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), tmp0);
(*m0).labels += ((NI) 2);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = tmp0;
LOC1[1] = gettypedesc_535673_839829468(m0, (*sym0).typ);
LOC1[2] = (*lib0).name;
LOC2 = (NimStringDesc*)0;
LOC2 = HEX24_178856_2381377266(extname0);
LOC1[3] = makecstring_191638_155036129(LOC2);
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 16))- 0], ((NimStringDesc*) &T839829468_584), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = (*sym0).loc.r;
LOC3[1] = gettypedesc_535673_839829468(m0, (*sym0).loc.t);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 9))- 0], ((NimStringDesc*) &T839829468_585), LOC3, 2);
}
N_NIMCALL(void, assignglobalvar_538819_839829468)(Tcproc529021* p0, Tsym292834* s0) {
{ {
Ropeobj178006* LOC5;
if (!((*s0).loc.k == ((Tlockind292808) 0))) goto LA3;
LOC5 = (Ropeobj178006*)0;
LOC5 = manglename_533205_839829468(s0);
fillloc_532282_839829468((&(*s0).loc), ((Tlockind292808) 3), (*s0).typ, LOC5, ((Tstorageloc292812) 3));
}
LA3: ;
{
Tcgen529027* q0;
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0)) goto LA8;
q0 = findpendingmodule_532241_839829468((*p0).module, s0);
{
NIM_BOOL LOC12;
NIM_BOOL LOC14;
LOC12 = (NIM_BOOL)0;
LOC12 = !((q0 == NIM_NIL));
if (!(LOC12)) goto LA13;
LOC14 = (NIM_BOOL)0;
LOC14 = containsorincl_268862_2627731572((&(*q0).declaredthings), (*s0).Sup.id);
LOC12 = !(LOC14);
LA13: ;
if (!LOC12) goto LA15;
varindynamiclib_538812_839829468(q0, s0);
}
goto LA10;
LA15: ;
{
asgnRefNoCycle((void**) (&(*s0).loc.r), mangledynlibproc_538816_839829468(s0));
}
LA10: ;
goto BeforeRet;
}
LA8: ;
useheader_532369_839829468((*p0).module, s0);
{
if (!(((*s0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0)) goto LA20;
goto BeforeRet;
}
LA20: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA24;
declarethreadvar_538676_839829468((*p0).module, s0, (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0));
}
goto LA22;
LA24: ;
{
Ropeobj178006* decl0;
Ropeobj178006* td0;
decl0 = NIM_NIL;
td0 = gettypedesc_535673_839829468((*p0).module, (*s0).loc.t);
{
TY178507 LOC43;
if (!(*s0).constraint == 0) goto LA29;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 5))&31U)))!=0)) goto LA33;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_240));
}
LA33: ;
add_178482_2381377266(&decl0, td0);
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 8))&31U)))!=0)) goto LA37;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_121));
}
LA37: ;
{
if (!(((*s0).flags &(1U<<((NU)(((Tsymflag292184) 7))&31U)))!=0)) goto LA41;
add_178487_2381377266(&decl0, ((NimStringDesc*) &T839829468_122));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = (*s0).loc.r;
addf_179205_2381377266(&decl0, ((NimStringDesc*) &T839829468_242), LOC43, 1);
}
goto LA27;
LA29: ;
{
NimStringDesc* LOC45;
TY532811 LOC46;
LOC45 = (NimStringDesc*)0;
LOC45 = rawNewString((*(*s0).constraint).kindU.S3.strval->Sup.len + 3);
appendString(LOC45, (*(*s0).constraint).kindU.S3.strval);
appendString(LOC45, ((NimStringDesc*) &T839829468_497));
memset((void*)LOC46, 0, sizeof(LOC46));
LOC46[0] = td0;
LOC46[1] = (*s0).loc.r;
decl0 = HEX25_178905_2381377266(LOC45, LOC46, 2);
}
LA27: ;
add_178482_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 9))- 0], decl0);
}
LA22: ;
{
if (!(((NI) 0) < (*p0).withinloop)) goto LA49;
resetloc_538350_839829468(p0, (&(*s0).loc));
}
LA49: ;
{
TY535238 LOC55;
NimStringDesc* LOC56;
NimStringDesc* LOC57;
if (!(((*(*(*p0).module).module).options & 163840) == 163840)) goto LA53;
memset((void*)LOC55, 0, sizeof(LOC55));
LOC56 = (NimStringDesc*)0;
LOC56 = rawNewString((*(*(*s0).owner).name).s->Sup.len + (*(*s0).name).s->Sup.len + 1);
appendString(LOC56, (*(*(*s0).owner).name).s);
appendChar(LOC56, 46);
appendString(LOC56, (*(*s0).name).s);
LOC57 = (NimStringDesc*)0;
LOC57 = nsuNormalize(LOC56);
LOC55[0] = makecstring_191638_155036129(LOC57);
LOC55[1] = (*s0).loc.r;
LOC55[2] = gentypeinfo_535941_839829468((*p0).module, (*s0).typ);
appcg_532632_839829468((*p0).module, &(*(*p0).module).s[(((Tcfilesection529005) 15))- 0], ((NimStringDesc*) &T839829468_586), LOC55, 3);
}
LA53: ;
}BeforeRet: ;
}
N_NIMCALL(Ropeobj178006*, gentraverseprocforglobal_538032_839829468)(Tcgen529027* m0, Tsym292834* s0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ttraversalclosure537019 c0;
Tcproc529021* p0;
Ropeobj178006* sloc0;
Ropeobj178006* header0;
TY178507 LOC8;
Ropeobj178006* generatedproc0;
TY535235 LOC9;
Ropeobj178006** LOC10;
Ropeobj178006** LOC11;
Ropeobj178006** LOC12;
TY178507 LOC13;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468(m0, (*s0).loc.t);
memset((void*)(&c0), 0, sizeof(c0));
p0 = newproc_529206_3723162438(NIM_NIL, m0);
sloc0 = (*s0).loc.r;
result0 = gettempname_533598_839829468(m0);
{
NIM_BOOL LOC4;
LOC4 = (NIM_BOOL)0;
LOC4 = (((*s0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0);
if (!(LOC4)) goto LA5;
LOC4 = emulatedthreadvars_532949_839829468();
LA5: ;
if (!LOC4) goto LA6;
accessthreadlocalvar_532945_839829468(p0, s0);
sloc0 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), sloc0);
}
LA6: ;
c0.visitorfrmt = copyString(((NimStringDesc*) &T839829468_587));
c0.p = p0;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = result0;
header0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_588), LOC8, 1);
gentraverseproc_537022_839829468((&c0), sloc0, (*s0).loc.t);
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = header0;
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 0));
LOC9[1] = (*LOC10);
LOC11 = (Ropeobj178006**)0;
LOC11 = s_529179_3723162438(p0, ((Tcprocsection529011) 1));
LOC9[2] = (*LOC11);
LOC12 = (Ropeobj178006**)0;
LOC12 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
LOC9[3] = (*LOC12);
generatedproc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_190), LOC9, 4);
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = header0;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], ((NimStringDesc*) &T839829468_191), LOC13, 1);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], generatedproc0);
return result0;
}
N_NIMCALL(void, registergcroot_543762_839829468)(Tcproc529021* p0, Tsym292834* v0) {
{
NIM_BOOL LOC3;
Ropeobj178006* prc0;
Ropeobj178006** LOC7;
TY178507 LOC8;
LOC3 = (NIM_BOOL)0;
LOC3 = ((240 &(1U<<((NU)(gselectedgc_169133_2607990831)&7U)))!=0);
if (!(LOC3)) goto LA4;
LOC3 = containsgarbagecollectedref_320117_3876443242((*v0).loc.t);
LA4: ;
if (!LOC3) goto LA5;
prc0 = gentraverseprocforglobal_538032_839829468((*p0).module, v0);
LOC7 = (Ropeobj178006**)0;
LOC7 = procsec_529194_3723162438((*(*p0).module).initproc, ((Tcprocsection529011) 1));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = prc0;
appcg_532632_839829468((*p0).module, LOC7, ((NimStringDesc*) &T839829468_589), LOC8, 1);
}
LA5: ;
}
static N_INLINE(NIM_BOOL, isassignedimmediately_543781_839829468)(Tnode292802* n0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
if (!((*n0).kind == ((Tnodekind292020) 1))) goto LA3;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA3: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = isinvalidreturntype_533550_839829468((*n0).typ);
if (!LOC7) goto LA8;
result0 = NIM_FALSE;
goto BeforeRet;
}
LA8: ;
result0 = NIM_TRUE;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, genasgncall_543695_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* d0) {
{
Ttype292840* LOC3;
LOC3 = (Ttype292840*)0;
LOC3 = skiptypes_296099_850551059((*(*ri0).kindU.S6.sons->data[((NI) 0)]).typ, 2048);
if (!((*LOC3).callconv == ((Tcallingconvention292002) 8))) goto LA4;
genclosurecall_540452_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA4: ;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
geninfixcall_541929_839829468(p0, le0, ri0, d0);
}
goto LA1;
LA9: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = ((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC12)) goto LA13;
LOC12 = (((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 28))&31U)))!=0);
LA13: ;
if (!LOC12) goto LA14;
gennamedparamcall_542616_839829468(p0, ri0, d0);
}
goto LA1;
LA14: ;
{
genprefixcall_539960_839829468(p0, le0, ri0, d0);
}
LA1: ;
poststmtactions_532942_839829468(p0);
}
static N_INLINE(void, loadinto_543928_839829468)(Tcproc529021* p0, Tnode292802* le0, Tnode292802* ri0, Tloc292816* a0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*ri0).kind == ((Tnodekind292020) 27) || (*ri0).kind == ((Tnodekind292020) 29) || (*ri0).kind == ((Tnodekind292020) 30) || (*ri0).kind == ((Tnodekind292020) 31) || (*ri0).kind == ((Tnodekind292020) 26) || (*ri0).kind == ((Tnodekind292020) 28) || (*ri0).kind == ((Tnodekind292020) 32));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = !(((*(*ri0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3)));
if (LOC5) goto LA6;
LOC5 = ((*(*(*ri0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).magic == ((Tmagic292524) 0));
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
genasgncall_543695_839829468(p0, le0, ri0, a0);
}
goto LA1;
LA7: ;
{
if (!((*ri0).kind == ((Tnodekind292020) 47) || (*ri0).kind == ((Tnodekind292020) 65))) goto LA10;
genderef_543921_839829468(p0, ri0, a0, NIM_TRUE);
}
goto LA1;
LA10: ;
{
expr_539248_839829468(p0, ri0, a0);
}
LA1: ;
}
N_NIMCALL(void, gensinglevar_544276_839829468)(Tcproc529021* p0, Tnode292802* a0) {
Tsym292834* v0;
Tcproc529021* targetproc0;
{ v0 = (*(*a0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
if (!!(((1082130432 & (*v0).flags) == 0))) goto LA3;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0)) goto LA7;
gengotovar_544258_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 2)]);
}
LA7: ;
goto BeforeRet;
}
LA3: ;
targetproc0 = p0;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA11;
{
NIM_BOOL LOC15;
NIM_BOOL LOC16;
LOC15 = (NIM_BOOL)0;
LOC16 = (NIM_BOOL)0;
LOC16 = (((*v0).flags & 96) == 32);
if (!(LOC16)) goto LA17;
LOC16 = ((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1));
LA17: ;
LOC15 = LOC16;
if (!(LOC15)) goto LA18;
LOC15 = !((((*v0).loc.flags & 72) == 0));
LA18: ;
if (!LOC15) goto LA19;
goto BeforeRet;
}
LA19: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 9))&31U)))!=0)) goto LA23;
targetproc0 = (*(*p0).module).preinitproc;
}
LA23: ;
assignglobalvar_538819_839829468(targetproc0, v0);
genobjectinit_538242_839829468((*(*p0).module).preinitproc, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = (((*v0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC27)) goto LA28;
LOC27 = !((generatedheader_532201_839829468 == NIM_NIL));
LA28: ;
if (!LOC27) goto LA29;
genvarprototypeaux_544254_839829468(generatedheader_532201_839829468, v0);
}
LA29: ;
registergcroot_543762_839829468(p0, v0);
}
goto LA9;
LA11: ;
{
Tnode292802* value0;
NIM_BOOL imm0;
value0 = (*a0).kindU.S6.sons->data[((NI) 2)];
imm0 = isassignedimmediately_543781_839829468(value0);
{
NIM_BOOL LOC34;
NIM_BOOL LOC35;
NIM_BOOL LOC36;
NIM_BOOL LOC38;
NIM_BOOL LOC42;
Ropeobj178006* decl0;
Tloc292816 tmp0;
LOC34 = (NIM_BOOL)0;
LOC35 = (NIM_BOOL)0;
LOC36 = (NIM_BOOL)0;
LOC36 = imm0;
if (!(LOC36)) goto LA37;
LOC38 = (NIM_BOOL)0;
LOC38 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC38) goto LA39;
LOC38 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA39: ;
LOC36 = LOC38;
LA37: ;
LOC35 = LOC36;
if (!(LOC35)) goto LA40;
LOC35 = ((*p0).splitdecls == ((NI) 0));
LA40: ;
LOC34 = LOC35;
if (!(LOC34)) goto LA41;
LOC42 = (NIM_BOOL)0;
LOC42 = containshiddenpointer_320120_3876443242((*v0).typ);
LOC34 = !(LOC42);
LA41: ;
if (!LOC34) goto LA43;
genlinedir_532823_839829468(p0, a0);
decl0 = localvardecl_538532_839829468(p0, v0);
memset((void*)(&tmp0), 0, sizeof(tmp0));
{
NIM_BOOL LOC47;
NIM_BOOL LOC48;
Tnode292802* LOC50;
Tnode292802* LOC52;
Ropeobj178006* params0;
Ttype292840* typ0;
TY532811 LOC66;
LOC47 = (NIM_BOOL)0;
LOC48 = (NIM_BOOL)0;
LOC48 = ((*value0).kind == ((Tnodekind292020) 27) || (*value0).kind == ((Tnodekind292020) 29) || (*value0).kind == ((Tnodekind292020) 30) || (*value0).kind == ((Tnodekind292020) 31) || (*value0).kind == ((Tnodekind292020) 26) || (*value0).kind == ((Tnodekind292020) 28) || (*value0).kind == ((Tnodekind292020) 32));
if (!(LOC48)) goto LA49;
LOC50 = (Tnode292802*)0;
LOC50 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC48 = ((*LOC50).kind == ((Tnodekind292020) 3));
LA49: ;
LOC47 = LOC48;
if (!(LOC47)) goto LA51;
LOC52 = (Tnode292802*)0;
LOC52 = HEX5BHEX5D_293238_850551059(value0, ((NI) 0));
LOC47 = (((*(*LOC52).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 24))&31U)))!=0);
LA51: ;
if (!LOC47) goto LA53;
params0 = (Ropeobj178006*)0;
typ0 = skiptypes_296099_850551059((*(*value0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106232576256));
{
NI i_544619_839829468;
NI HEX3Atmp_544825_839829468;
NI LOC56;
NI res_544828_839829468;
i_544619_839829468 = (NI)0;
HEX3Atmp_544825_839829468 = (NI)0;
LOC56 = (NI)0;
LOC56 = len_293081_850551059(value0);
HEX3Atmp_544825_839829468 = (LOC56 - 1);
res_544828_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC65;
if (!(res_544828_839829468 <= HEX3Atmp_544825_839829468)) goto LA58;
i_544619_839829468 = res_544828_839829468;
{
TY533289 LOC63;
Ropeobj178006* LOC64;
if (!!((params0 == NIM_NIL))) goto LA61;
memset((void*)LOC63, 0, sizeof(LOC63));
LOC64 = (Ropeobj178006*)0;
LOC64 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_110), LOC63, 0);
add_178482_2381377266(¶ms0, LOC64);
}
LA61: ;
LOC65 = (Ropeobj178006*)0;
LOC65 = genotherarg_539277_839829468(p0, value0, i_544619_839829468, typ0);
add_178482_2381377266(¶ms0, LOC65);
res_544828_839829468 += ((NI) 1);
} LA58: ;
}
}
memset((void*)LOC66, 0, sizeof(LOC66));
LOC66[0] = decl0;
LOC66[1] = params0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_590), LOC66, 2);
}
goto LA45;
LA53: ;
{
TY532811 LOC68;
initlocexprsingleuse_539289_839829468(p0, value0, (&tmp0));
memset((void*)LOC68, 0, sizeof(LOC68));
LOC68[0] = decl0;
LOC68[1] = rdloc_538188_839829468((&tmp0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_591), LOC68, 2);
}
LA45: ;
goto BeforeRet;
}
LA43: ;
assignlocalvar_538614_839829468(p0, v0);
initlocalvar_538398_839829468(p0, v0, imm0);
}
LA9: ;
{
if (!!(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)))) goto LA71;
genlinedir_532823_839829468(targetproc0, a0);
loadinto_543928_839829468(targetproc0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&(*v0).loc));
}
LA71: ;
}BeforeRet: ;
}
N_NIMCALL(void, genclosurevar_544832_839829468)(Tcproc529021* p0, Tnode292802* a0) {
NIM_BOOL immediateasgn0;
immediateasgn0 = !(((*(*a0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1)));
{
Tloc292816 v0;
if (!immediateasgn0) goto LA3;
memset((void*)(&v0), 0, sizeof(v0));
initlocexpr_539283_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (&v0));
genlinedir_532823_839829468(p0, a0);
loadinto_543928_839829468(p0, (*a0).kindU.S6.sons->data[((NI) 0)], (*a0).kindU.S6.sons->data[((NI) 2)], (&v0));
}
LA3: ;
}
N_NIMCALL(void, genvartuple_543794_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 tup0;
Tloc292816 field0;
NI L0;
NIM_BOOL uselowering0;
Ttype292840* t0;
{ memset((void*)(&tup0), 0, sizeof(tup0));
memset((void*)(&field0), 0, sizeof(field0));
{
if (!!(((*n0).kind == ((Tnodekind292020) 36)))) goto LA3;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA3: ;
L0 = sonslen_295351_850551059(n0);
uselowering0 = NIM_FALSE;
{
NI i_543822_839829468;
NI HEX3Atmp_543905_839829468;
NI res_543908_839829468;
i_543822_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)0;
HEX3Atmp_543905_839829468 = (NI)(L0 - ((NI) 3));
res_543908_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543908_839829468 <= HEX3Atmp_543905_839829468)) goto LA7;
i_543822_839829468 = res_543908_839829468;
{
Tnode292802* LOC10;
LOC10 = (Tnode292802*)0;
LOC10 = HEX5BHEX5D_293238_850551059(n0, i_543822_839829468);
if (!!(((*LOC10).kind == ((Tnodekind292020) 3)))) goto LA11;
uselowering0 = NIM_TRUE;
goto LA5;
}
LA11: ;
res_543908_839829468 += ((NI) 1);
} LA7: ;
}
} LA5: ;
{
Tnode292802* LOC17;
if (!uselowering0) goto LA15;
LOC17 = (Tnode292802*)0;
LOC17 = lowertupleunpacking_433037_2218250499(n0, (*p0).prc);
genstmts_539244_839829468(p0, LOC17);
goto BeforeRet;
}
LA15: ;
genlinedir_532823_839829468(p0, n0);
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[(NI)(L0 - ((NI) 1))], (&tup0));
t0 = getuniquetype_528640_2036603609(tup0.t);
{
NI i_543846_839829468;
NI HEX3Atmp_543914_839829468;
NI res_543917_839829468;
i_543846_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)0;
HEX3Atmp_543914_839829468 = (NI)(L0 - ((NI) 3));
res_543917_839829468 = ((NI) 0);
{
while (1) {
if (!(res_543917_839829468 <= HEX3Atmp_543914_839829468)) goto LA20;
i_543846_839829468 = res_543917_839829468;
{
Tsym292834* v0;
v0 = (*(*n0).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA24;
goto LA21;
}
LA24: ;
{
if (!(((*v0).flags &(1U<<((NU)(((Tsymflag292184) 3))&31U)))!=0)) goto LA28;
assignglobalvar_538819_839829468(p0, v0);
genobjectinit_538242_839829468(p0, ((Tcprocsection529011) 1), (*v0).typ, (&(*v0).loc), NIM_TRUE);
registergcroot_543762_839829468(p0, v0);
}
goto LA26;
LA28: ;
{
Tnode292802* LOC31;
NIM_BOOL LOC32;
assignlocalvar_538614_839829468(p0, v0);
LOC31 = (Tnode292802*)0;
LOC31 = HEX5BHEX5D_293238_850551059(n0, (NI)(L0 - ((NI) 1)));
LOC32 = (NIM_BOOL)0;
LOC32 = isassignedimmediately_543781_839829468(LOC31);
initlocalvar_538398_839829468(p0, v0, LOC32);
}
LA26: ;
initloc_532273_839829468((&field0), ((Tlockind292808) 6), (*t0).sons->data[i_543846_839829468], tup0.s);
{
TY532811 LOC37;
if (!((*t0).kind == ((Ttypekind292244) 18))) goto LA35;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = rdloc_538188_839829468((&tup0));
LOC37[1] = rope_178401_2381377266(((NI64) (i_543846_839829468)));
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_185), LOC37, 2);
}
goto LA33;
LA35: ;
{
TY532811 LOC43;
{
if (!!(((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kind == ((Tnodekind292020) 3)))) goto LA41;
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_592));
}
LA41: ;
memset((void*)LOC43, 0, sizeof(LOC43));
LOC43[0] = rdloc_538188_839829468((&tup0));
LOC43[1] = manglerecfieldname_534361_839829468((*(*(*t0).n).kindU.S6.sons->data[i_543846_839829468]).kindU.S4.sym, t0);
field0.r = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_90), LOC43, 2);
}
LA33: ;
putlocintodest_539258_839829468(p0, (&(*v0).loc), (&field0));
} LA21: ;
res_543917_839829468 += ((NI) 1);
} LA20: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(void, genvarstmt_544854_839829468)(Tcproc529021* p0, Tnode292802* n0) {
{
NI i_544869_839829468;
NI HEX3Atmp_544902_839829468;
NI LOC2;
NI res_544905_839829468;
i_544869_839829468 = (NI)0;
HEX3Atmp_544902_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_544902_839829468 = (NI)(LOC2 - ((NI) 1));
res_544905_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544905_839829468 <= HEX3Atmp_544902_839829468)) goto LA4;
i_544869_839829468 = res_544905_839829468;
{
Tnode292802* a0;
a0 = (*n0).kindU.S6.sons->data[i_544869_839829468];
{
if (!((*a0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!((*a0).kind == ((Tnodekind292020) 35))) goto LA12;
{
if (!((*(*a0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3))) goto LA16;
gensinglevar_544276_839829468(p0, a0);
}
goto LA14;
LA16: ;
{
genclosurevar_544832_839829468(p0, a0);
}
LA14: ;
}
goto LA10;
LA12: ;
{
genvartuple_543794_839829468(p0, a0);
}
LA10: ;
} LA5: ;
res_544905_839829468 += ((NI) 1);
} LA4: ;
}
}
}
static N_INLINE(NIM_BOOL, emitlazily_532248_839829468)(Tsym292834* s0) {
NIM_BOOL result0;
NIM_BOOL LOC1;
Tsym292834* LOC3;
result0 = (NIM_BOOL)0;
LOC1 = (NIM_BOOL)0;
LOC1 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0);
if (LOC1) goto LA2;
LOC3 = (Tsym292834*)0;
LOC3 = getmodule_299123_2984716966(s0);
LOC1 = (((*LOC3).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0);
LA2: ;
result0 = LOC1;
return result0;
}
N_NIMCALL(void, genconststmt_544909_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
NI i_544924_839829468;
NI HEX3Atmp_544975_839829468;
NI LOC2;
NI res_544978_839829468;
i_544924_839829468 = (NI)0;
HEX3Atmp_544975_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_544975_839829468 = (NI)(LOC2 - ((NI) 1));
res_544978_839829468 = ((NI) 0);
{
while (1) {
if (!(res_544978_839829468 <= HEX3Atmp_544975_839829468)) goto LA4;
i_544924_839829468 = res_544978_839829468;
{
Tnode292802* it0;
Tsym292834* c0;
it0 = (*t0).kindU.S6.sons->data[i_544924_839829468];
{
if (!((*it0).kind == ((Tnodekind292020) 125))) goto LA8;
goto LA5;
}
LA8: ;
{
if (!!(((*it0).kind == ((Tnodekind292020) 102)))) goto LA12;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_593));
}
LA12: ;
c0 = (*(*it0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC16;
LOC16 = (NIM_BOOL)0;
LOC16 = containscompiletimeonly_328721_3876443242((*c0).typ);
if (!LOC16) goto LA17;
goto LA5;
}
goto LA14;
LA17: ;
{
NIM_BOOL LOC20;
NIM_BOOL LOC21;
NI LOC24;
LOC20 = (NIM_BOOL)0;
LOC21 = (NIM_BOOL)0;
LOC21 = ((17629200 &((NU64)1<<((NU)((*(*c0).typ).kind)&63U)))!=0);
if (!(LOC21)) goto LA22;
LOC21 = !((((*c0).loc.flags &(1U<<((NU)(((Tlocflag292810) 3))&15U)))!=0));
LA22: ;
LOC20 = LOC21;
if (!(LOC20)) goto LA23;
LOC24 = (NI)0;
LOC24 = len_293081_850551059((*c0).ast);
LOC20 = !((LOC24 == ((NI) 0)));
LA23: ;
if (!LOC20) goto LA25;
{
NIM_BOOL LOC29;
LOC29 = (NIM_BOOL)0;
LOC29 = emitlazily_532248_839829468(c0);
if (!!(LOC29)) goto LA30;
requestconstimpl_539240_839829468(p0, c0);
}
LA30: ;
}
goto LA14;
LA25: ;
LA14: ;
} LA5: ;
res_544978_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, gencasestringbranch_547100_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, Ropeobj178006* labl0, Ropeobj178006** branches0, NI branches0Len0) {
Tloc292816 x0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
length0 = sonslen_295351_850551059(b0);
{
NI i_547122_839829468;
NI HEX3Atmp_547410_839829468;
NI res_547413_839829468;
i_547122_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)0;
HEX3Atmp_547410_839829468 = (NI)(length0 - ((NI) 2));
res_547413_839829468 = ((NI) 0);
{
while (1) {
NI j0;
NI64 LOC4;
TY535238 LOC5;
if (!(res_547413_839829468 <= HEX3Atmp_547410_839829468)) goto LA3;
i_547122_839829468 = res_547413_839829468;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_547122_839829468], (&x0));
LOC4 = (NI64)0;
LOC4 = hashstring_528100_2036603609((*(*b0).kindU.S6.sons->data[i_547122_839829468]).kindU.S3.strval);
j0 = ((NI) ((NI64)(LOC4 & ((NI64) ((branches0Len0-1))))));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468(e0);
LOC5[1] = rdloc_538188_839829468((&x0));
LOC5[2] = labl0;
appcg_532632_839829468((*p0).module, &branches0[j0], ((NimStringDesc*) &T839829468_595), LOC5, 3);
res_547413_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, exprblock_544103_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
expr_539248_839829468(p0, n0, d0);
endblock_544060_839829468(p0);
}
N_NIMCALL(Ropeobj178006*, gencasesecondpass_546965_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NI labid0, NI until0) {
Ropeobj178006* result0;
Ropeobj178006* lend0;
result0 = (Ropeobj178006*)0;
lend0 = getlabel_539217_839829468(p0);
{
NI i_546984_839829468;
NI res_547017_839829468;
i_546984_839829468 = (NI)0;
res_547017_839829468 = ((NI) 1);
{
while (1) {
TY178507 LOC10;
if (!(res_547017_839829468 <= until0)) goto LA3;
i_546984_839829468 = res_547017_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC6)) goto LA7;
LOC6 = isemptytype_297441_850551059((*t0).typ);
LA7: ;
if (!LOC6) goto LA8;
(*d0).k = ((Tlockind292808) 0);
}
LA8: ;
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rope_178401_2381377266(((NI64) ((NI)(labid0 + i_546984_839829468))));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC10, 1);
{
NI length0;
TY178507 LOC15;
if (!((*(*t0).kindU.S6.sons->data[i_546984_839829468]).kind == ((Tnodekind292020) 85))) goto LA13;
length0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_546984_839829468]);
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[(NI)(length0 - ((NI) 1))], d0);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = lend0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC15, 1);
}
goto LA11;
LA13: ;
{
exprblock_544103_839829468(p0, (*(*t0).kindU.S6.sons->data[i_546984_839829468]).kindU.S6.sons->data[((NI) 0)], d0);
}
LA11: ;
res_547017_839829468 += ((NI) 1);
} LA3: ;
}
}
result0 = lend0;
return result0;
}
N_NIMCALL(void, gencasegenericbranch_546910_839829468)(Tcproc529021* p0, Tnode292802* b0, Tloc292816* e0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, Ropeobj178006* labl0) {
Tloc292816 x0;
Tloc292816 y0;
NI length0;
memset((void*)(&x0), 0, sizeof(x0));
memset((void*)(&y0), 0, sizeof(y0));
length0 = sonslen_295351_850551059(b0);
{
NI i_546932_839829468;
NI HEX3Atmp_546958_839829468;
NI res_546961_839829468;
i_546932_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)0;
HEX3Atmp_546958_839829468 = (NI)(length0 - ((NI) 2));
res_546961_839829468 = ((NI) 0);
{
while (1) {
if (!(res_546961_839829468 <= HEX3Atmp_546958_839829468)) goto LA3;
i_546932_839829468 = res_546961_839829468;
{
TY535235 LOC8;
if (!((*(*b0).kindU.S6.sons->data[i_546932_839829468]).kind == ((Tnodekind292020) 44))) goto LA6;
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 0)], (&x0));
initlocexpr_539283_839829468(p0, (*(*b0).kindU.S6.sons->data[i_546932_839829468]).kindU.S6.sons->data[((NI) 1)], (&y0));
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdcharloc_538227_839829468(e0);
LOC8[1] = rdcharloc_538227_839829468((&x0));
LOC8[2] = rdcharloc_538227_839829468((&y0));
LOC8[3] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), rangeformat0, LOC8, 4);
}
goto LA4;
LA6: ;
{
TY535238 LOC10;
initlocexpr_539283_839829468(p0, (*b0).kindU.S6.sons->data[i_546932_839829468], (&x0));
memset((void*)LOC10, 0, sizeof(LOC10));
LOC10[0] = rdcharloc_538227_839829468(e0);
LOC10[1] = rdcharloc_538227_839829468((&x0));
LOC10[2] = labl0;
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), eqformat0, LOC10, 3);
}
LA4: ;
res_546961_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(Ropeobj178006*, genifforcaseuntil_547021_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0, NI until0, Tloc292816* a0) {
Ropeobj178006* result0;
NI labid0;
result0 = (Ropeobj178006*)0;
labid0 = (*p0).labels;
{
NI i_547042_839829468;
NI res_547083_839829468;
i_547042_839829468 = (NI)0;
res_547083_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547083_839829468 <= until0)) goto LA3;
i_547042_839829468 = res_547083_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC8;
Ropeobj178006* LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547042_839829468]).kind == ((Tnodekind292020) 85))) goto LA6;
LOC8 = (Ropeobj178006*)0;
LOC8 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC9 = (Ropeobj178006*)0;
LOC9 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC8);
gencasegenericbranch_546910_839829468(p0, (*t0).kindU.S6.sons->data[i_547042_839829468], a0, rangeformat0, eqformat0, LOC9);
}
goto LA4;
LA6: ;
{
TY178507 LOC11;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC11, 1);
}
LA4: ;
res_547083_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI LOC14;
NI gototarget0;
TY178507 LOC17;
TY178507 LOC18;
LOC14 = (NI)0;
LOC14 = len_293081_850551059(t0);
if (!(until0 < (NI)(LOC14 - ((NI) 1)))) goto LA15;
(*p0).labels += ((NI) 1);
gototarget0 = (*p0).labels;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC17, 1);
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = rope_178401_2381377266(((NI64) (gototarget0)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_599), LOC18, 1);
}
goto LA12;
LA15: ;
{
result0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), until0);
}
LA12: ;
return result0;
}
N_NIMCALL(void, gencasegeneric_547087_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0, NimStringDesc* rangeformat0, NimStringDesc* eqformat0) {
Tloc292816 a0;
Ropeobj178006* lend0;
NI LOC1;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
LOC1 = (NI)0;
LOC1 = sonslen_295351_850551059(t0);
lend0 = genifforcaseuntil_547021_839829468(p0, t0, d0, rangeformat0, eqformat0, (NI)(LOC1 - ((NI) 1)), (&a0));
fixlabel_539230_839829468(p0, lend0);
}
N_NIMCALL(void, genstringcase_547417_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NI strings0;
strings0 = ((NI) 0);
{
NI i_547435_839829468;
NI HEX3Atmp_547550_839829468;
NI LOC2;
NI res_547553_839829468;
i_547435_839829468 = (NI)0;
HEX3Atmp_547550_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_547550_839829468 = (NI)(LOC2 - ((NI) 1));
res_547553_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547553_839829468 <= HEX3Atmp_547550_839829468)) goto LA4;
i_547435_839829468 = res_547553_839829468;
{
NI LOC9;
if (!((*(*t0).kindU.S6.sons->data[i_547435_839829468]).kind == ((Tnodekind292020) 85))) goto LA7;
LOC9 = (NI)0;
LOC9 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i_547435_839829468]);
strings0 += (NI)(LOC9 - ((NI) 1));
}
LA7: ;
res_547553_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NI bitmask0;
NI LOC14;
TY191350* branches0;
Tloc292816 a0;
NI labid0;
TY532811 LOC26;
TY533289 LOC35;
Ropeobj178006* lend0;
NI LOC42;
if (!(((NI) 8) < strings0)) goto LA12;
LOC14 = (NI)0;
LOC14 = nextpoweroftwo_101629_1009420244(strings0);
bitmask0 = (NI)(LOC14 - ((NI) 1));
branches0 = (TY191350*)0;
branches0 = (TY191350*) newSeq((&NTI191350), ((NI) ((NI)(bitmask0 + ((NI) 1)))));
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
labid0 = (*p0).labels;
{
NI i_547484_839829468;
NI HEX3Atmp_547560_839829468;
NI LOC16;
NI res_547563_839829468;
i_547484_839829468 = (NI)0;
HEX3Atmp_547560_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(t0);
HEX3Atmp_547560_839829468 = (NI)(LOC16 - ((NI) 1));
res_547563_839829468 = ((NI) 1);
{
while (1) {
if (!(res_547563_839829468 <= HEX3Atmp_547560_839829468)) goto LA18;
i_547484_839829468 = res_547563_839829468;
(*p0).labels += ((NI) 1);
{
Ropeobj178006* LOC23;
Ropeobj178006* LOC24;
if (!((*(*t0).kindU.S6.sons->data[i_547484_839829468]).kind == ((Tnodekind292020) 85))) goto LA21;
LOC23 = (Ropeobj178006*)0;
LOC23 = rope_178401_2381377266(((NI64) ((*p0).labels)));
LOC24 = (Ropeobj178006*)0;
LOC24 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_296), LOC23);
gencasestringbranch_547100_839829468(p0, (*t0).kindU.S6.sons->data[i_547484_839829468], (&a0), LOC24, branches0->data, branches0->Sup.len);
}
goto LA19;
LA21: ;
{
}
LA19: ;
res_547563_839829468 += ((NI) 1);
} LA18: ;
}
}
memset((void*)LOC26, 0, sizeof(LOC26));
LOC26[0] = rdloc_538188_839829468((&a0));
LOC26[1] = rope_178401_2381377266(((NI64) (bitmask0)));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_596), LOC26, 2);
{
NI j_547518_839829468;
NI HEX3Atmp_547568_839829468;
NI res_547571_839829468;
j_547518_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (NI)0;
HEX3Atmp_547568_839829468 = (branches0 ? (branches0->Sup.len-1) : -1);
res_547571_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547571_839829468 <= HEX3Atmp_547568_839829468)) goto LA29;
j_547518_839829468 = res_547571_839829468;
{
TY532811 LOC34;
if (!!((branches0->data[j_547518_839829468] == NIM_NIL))) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = intliteral_539270_839829468(((NI64) (j_547518_839829468)));
LOC34[1] = branches0->data[j_547518_839829468];
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_597), LOC34, 2);
}
LA32: ;
res_547571_839829468 += ((NI) 1);
} LA29: ;
}
}
memset((void*)LOC35, 0, sizeof(LOC35));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC35, 0);
{
NI LOC38;
TY178507 LOC41;
LOC38 = (NI)0;
LOC38 = sonslen_295351_850551059(t0);
if (!!(((*(*t0).kindU.S6.sons->data[(NI)(LOC38 - ((NI) 1))]).kind == ((Tnodekind292020) 85)))) goto LA39;
memset((void*)LOC41, 0, sizeof(LOC41));
LOC41[0] = rope_178401_2381377266(((NI64) ((*p0).labels)));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_598), LOC41, 1);
}
LA39: ;
LOC42 = (NI)0;
LOC42 = sonslen_295351_850551059(t0);
lend0 = gencasesecondpass_546965_839829468(p0, t0, d0, ((NI) (labid0)), (NI)(LOC42 - ((NI) 1)));
fixlabel_539230_839829468(p0, lend0);
}
goto LA10;
LA12: ;
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_490), ((NimStringDesc*) &T839829468_595));
}
LA10: ;
}
N_NIMCALL(void, gengotoforcase_545673_839829468)(Tcproc529021* p0, Tnode292802* casestmt0) {
{ {
NI i_545695_839829468;
NI HEX3Atmp_545737_839829468;
NI LOC2;
NI res_545740_839829468;
i_545695_839829468 = (NI)0;
HEX3Atmp_545737_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(casestmt0);
HEX3Atmp_545737_839829468 = (LOC2 - 1);
res_545740_839829468 = ((NI) 1);
{
while (1) {
TY533289 LOC5;
NI LOC6;
Tnode292802* it0;
Tnode292802* LOC16;
if (!(res_545740_839829468 <= HEX3Atmp_545737_839829468)) goto LA4;
i_545695_839829468 = res_545740_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC6 = (NI)0;
LOC6 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC5, 0);
it0 = (*casestmt0).kindU.S6.sons->data[i_545695_839829468];
{
NI j_545711_839829468;
NI HEX3Atmp_545730_839829468;
NI LOC8;
NI res_545733_839829468;
j_545711_839829468 = (NI)0;
HEX3Atmp_545730_839829468 = (NI)0;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(it0);
HEX3Atmp_545730_839829468 = (NI)(LOC8 - ((NI) 2));
res_545733_839829468 = ((NI) 0);
{
while (1) {
NI64 val0;
TY178507 LOC15;
if (!(res_545733_839829468 <= HEX3Atmp_545730_839829468)) goto LA10;
j_545711_839829468 = res_545733_839829468;
{
if (!((*(*it0).kindU.S6.sons->data[j_545711_839829468]).kind == ((Tnodekind292020) 44))) goto LA13;
localerror_196085_155036129((*it0).info, ((NimStringDesc*) &T839829468_579));
goto BeforeRet;
}
LA13: ;
val0 = getordvalue_320129_3876443242((*it0).kindU.S6.sons->data[j_545711_839829468]);
memset((void*)LOC15, 0, sizeof(LOC15));
LOC15[0] = rope_178401_2381377266(val0);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_602), LOC15, 1);
res_545733_839829468 += ((NI) 1);
} LA10: ;
}
}
LOC16 = (Tnode292802*)0;
LOC16 = lastson_295364_850551059(it0);
genstmts_539244_839829468(p0, LOC16);
endblock_544060_839829468(p0);
res_545740_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
}
N_NIMCALL(NIM_BOOL, branchhastoobigrange_547576_839829468)(Tnode292802* b0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
{
NI i_547591_839829468;
NI HEX3Atmp_547609_839829468;
NI LOC2;
NI res_547612_839829468;
i_547591_839829468 = (NI)0;
HEX3Atmp_547609_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(b0);
HEX3Atmp_547609_839829468 = (NI)(LOC2 - ((NI) 2));
res_547612_839829468 = ((NI) 0);
{
while (1) {
if (!(res_547612_839829468 <= HEX3Atmp_547609_839829468)) goto LA4;
i_547591_839829468 = res_547612_839829468;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = ((*(*b0).kindU.S6.sons->data[i_547591_839829468]).kind == ((Tnodekind292020) 44));
if (!(LOC7)) goto LA8;
LOC7 = (IL64(256) < (NI64)((*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 1)]).kindU.S1.intval - (*(*(*b0).kindU.S6.sons->data[i_547591_839829468]).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval));
LA8: ;
if (!LOC7) goto LA9;
result0 = NIM_TRUE;
goto BeforeRet;
}
LA9: ;
res_547612_839829468 += ((NI) 1);
} LA4: ;
}
}
}BeforeRet: ;
return result0;
}
N_NIMCALL(NI, ifswitchsplitpoint_547616_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI result0;
result0 = (NI)0;
{
NI i_547631_839829468;
NI HEX3Atmp_547655_839829468;
NI LOC2;
NI res_547658_839829468;
i_547631_839829468 = (NI)0;
HEX3Atmp_547655_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = len_293081_850551059(n0);
HEX3Atmp_547655_839829468 = (NI)(LOC2 - ((NI) 1));
res_547658_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* branch0;
Tnode292802* stmtblock0;
if (!(res_547658_839829468 <= HEX3Atmp_547655_839829468)) goto LA4;
i_547631_839829468 = res_547658_839829468;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547631_839829468);
stmtblock0 = lastson_295364_850551059(branch0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = stmtscontainpragma_528083_2036603609(stmtblock0, ((Tspecialword275003) 181));
if (!LOC7) goto LA8;
result0 = i_547631_839829468;
}
goto LA5;
LA8: ;
{
if (!!(((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 0))&7U)))!=0))) goto LA11;
{
NIM_BOOL LOC15;
LOC15 = (NIM_BOOL)0;
LOC15 = ((*branch0).kind == ((Tnodekind292020) 85));
if (!(LOC15)) goto LA16;
LOC15 = branchhastoobigrange_547576_839829468(branch0);
LA16: ;
if (!LOC15) goto LA17;
result0 = i_547631_839829468;
}
LA17: ;
}
goto LA5;
LA11: ;
LA5: ;
res_547658_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genordinalcase_547725_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
NI splitpoint0;
Tloc292816 a0;
Ropeobj178006* lend0;
splitpoint0 = ifswitchsplitpoint_547616_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
{
if (!(((NI) 0) < splitpoint0)) goto LA3;
lend0 = genifforcaseuntil_547021_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601), splitpoint0, (&a0));
}
goto LA1;
LA3: ;
{
lend0 = NIM_NIL;
}
LA1: ;
{
NI LOC8;
TY178507 LOC11;
NIM_BOOL hasdefault0;
TY533289 LOC37;
LOC8 = (NI)0;
LOC8 = len_293081_850551059(n0);
if (!((NI)(splitpoint0 + ((NI) 1)) < LOC8)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = rdcharloc_538227_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC11, 1);
hasdefault0 = NIM_FALSE;
{
NI i_547758_839829468;
NI HEX3Atmp_547817_839829468;
NI HEX3Atmp_547818_839829468;
NI LOC13;
NI res_547821_839829468;
i_547758_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)0;
HEX3Atmp_547818_839829468 = (NI)0;
HEX3Atmp_547817_839829468 = (NI)(splitpoint0 + ((NI) 1));
LOC13 = (NI)0;
LOC13 = len_293081_850551059(n0);
HEX3Atmp_547818_839829468 = (LOC13 - 1);
res_547821_839829468 = HEX3Atmp_547817_839829468;
{
while (1) {
Tnode292802* branch0;
Tnode292802* LOC28;
TY533289 LOC29;
if (!(res_547821_839829468 <= HEX3Atmp_547818_839829468)) goto LA15;
i_547758_839829468 = res_547821_839829468;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC18)) goto LA19;
LOC18 = isemptytype_297441_850551059((*n0).typ);
LA19: ;
if (!LOC18) goto LA20;
(*d0).k = ((Tlockind292808) 0);
}
LA20: ;
branch0 = HEX5BHEX5D_293238_850551059(n0, i_547758_839829468);
{
if (!((*branch0).kind == ((Tnodekind292020) 85))) goto LA24;
gencaserange_537028_839829468(p0, branch0);
}
goto LA22;
LA24: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_181), LOC27, 0);
hasdefault0 = NIM_TRUE;
}
LA22: ;
LOC28 = (Tnode292802*)0;
LOC28 = lastson_295364_850551059(branch0);
exprblock_544103_839829468(p0, LOC28, d0);
memset((void*)LOC29, 0, sizeof(LOC29));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_182), LOC29, 0);
res_547821_839829468 += ((NI) 1);
} LA15: ;
}
}
{
NIM_BOOL LOC32;
TY533289 LOC36;
LOC32 = (NIM_BOOL)0;
LOC32 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 3))&7U)))!=0);
if (!(LOC32)) goto LA33;
LOC32 = !(hasdefault0);
LA33: ;
if (!LOC32) goto LA34;
memset((void*)LOC36, 0, sizeof(LOC36));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_604), LOC36, 0);
}
LA34: ;
memset((void*)LOC37, 0, sizeof(LOC37));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC37, 0);
}
LA9: ;
{
if (!!((lend0 == NIM_NIL))) goto LA40;
fixlabel_539230_839829468(p0, lend0);
}
LA40: ;
}
N_NIMCALL(void, gencase_547827_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ttype292840* LOC8;
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (Ttype292840*)0;
LOC8 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106242013440));
switch ((*LOC8).kind) {
case ((Ttypekind292244) 28):
{
genstringcase_547417_839829468(p0, t0, d0);
}
break;
case ((Ttypekind292244) 36) ... ((Ttypekind292244) 39):
{
gencasegeneric_547087_839829468(p0, t0, d0, ((NimStringDesc*) &T839829468_600), ((NimStringDesc*) &T839829468_601));
}
break;
default:
{
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = ((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC14)) goto LA15;
LOC14 = (((*(*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA15: ;
if (!LOC14) goto LA16;
gengotoforcase_545673_839829468(p0, t0);
}
goto LA12;
LA16: ;
{
genordinalcase_547725_839829468(p0, t0, d0);
}
LA12: ;
}
break;
}
}
static N_INLINE(Tnode292802*, pop_318246_1689653243)(Tnodeseq292796** s0) {
Tnode292802* result0;
NI L0;
result0 = (Tnode292802*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (Tnodeseq292796*) setLengthSeq(&((*s0))->Sup, sizeof(Tnode292802*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, blockleaveactions_545442_839829468)(Tcproc529021* p0, NI howmanytrys0, NI howmanyexcepts0) {
Tnodeseq292796* stack0;
NI alreadypoppedcnt0;
stack0 = (Tnodeseq292796*)0;
stack0 = (Tnodeseq292796*) newSeq((&NTI292796), ((NI) 0));
alreadypoppedcnt0 = (*p0).inexceptblock;
{
NI i_545471_839829468;
NI res_545596_839829468;
i_545471_839829468 = (NI)0;
res_545596_839829468 = ((NI) 1);
{
while (1) {
Tnode292802* trystmt0;
Tnode292802* finallystmt0;
if (!(res_545596_839829468 <= howmanytrys0)) goto LA3;
i_545471_839829468 = res_545596_839829468;
{
NIM_BOOL LOC6;
LOC6 = (NIM_BOOL)0;
LOC6 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC6) goto LA7;
LOC6 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA7: ;
if (!!(LOC6)) goto LA8;
{
if (!(((NI) 0) < alreadypoppedcnt0)) goto LA12;
alreadypoppedcnt0 -= ((NI) 1);
}
goto LA10;
LA12: ;
{
TY533289 LOC15;
memset((void*)LOC15, 0, sizeof(LOC15));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC15, 0);
}
LA10: ;
}
LA8: ;
trystmt0 = pop_318246_1689653243((&(*p0).nestedtrystmts));
stack0 = (Tnodeseq292796*) incrSeqV2(&(stack0)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&stack0->data[stack0->Sup.len]), trystmt0);
++stack0->Sup.len;
finallystmt0 = lastson_295364_850551059(trystmt0);
{
if (!((*finallystmt0).kind == ((Tnodekind292020) 107))) goto LA18;
genstmts_539244_839829468(p0, (*finallystmt0).kindU.S6.sons->data[((NI) 0)]);
}
LA18: ;
res_545596_839829468 += ((NI) 1);
} LA3: ;
}
}
{
NI i_545546_839829468;
NI HEX3Atmp_545601_839829468;
NI res_545604_839829468;
i_545546_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)0;
HEX3Atmp_545601_839829468 = (NI)(howmanytrys0 - ((NI) 1));
res_545604_839829468 = HEX3Atmp_545601_839829468;
{
while (1) {
if (!(((NI) 0) <= res_545604_839829468)) goto LA22;
i_545546_839829468 = res_545604_839829468;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), stack0->data[i_545546_839829468]);
++(*p0).nestedtrystmts->Sup.len;
res_545604_839829468 -= ((NI) 1);
} LA22: ;
}
}
{
NIM_BOOL LOC25;
LOC25 = (NIM_BOOL)0;
LOC25 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC25) goto LA26;
LOC25 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA26: ;
if (!!(LOC25)) goto LA27;
{
NI i_545587_839829468;
NI HEX3Atmp_545610_839829468;
NI res_545613_839829468;
i_545587_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)0;
HEX3Atmp_545610_839829468 = (NI)(howmanyexcepts0 - ((NI) 1));
res_545613_839829468 = HEX3Atmp_545610_839829468;
{
while (1) {
TY533289 LOC32;
if (!(((NI) 0) <= res_545613_839829468)) goto LA31;
i_545587_839829468 = res_545613_839829468;
memset((void*)LOC32, 0, sizeof(LOC32));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC32, 0);
res_545613_839829468 -= ((NI) 1);
} LA31: ;
}
}
}
LA27: ;
}
N_NIMCALL(void, genreturnstmt_545617_839829468)(Tcproc529021* p0, Tnode292802* t0) {
TY533289 LOC14;
{ {
if (!(((*t0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0)) goto LA3;
goto BeforeRet;
}
LA3: ;
(*p0).beforeretneeded = NIM_TRUE;
genlinedir_532823_839829468(p0, t0);
{
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA7;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
blockleaveactions_545442_839829468(p0, ((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0), (*p0).inexceptblock);
{
Ropeobj178006* safepoint0;
TY178507 LOC13;
if (!(((NI) 0) < ((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0))) goto LA11;
safepoint0 = (*p0).finallysafepoints->data[(NI)(((*p0).finallysafepoints ? (*p0).finallysafepoints->Sup.len : 0) - ((NI) 1))];
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_607), LOC13, 1);
}
LA11: ;
memset((void*)LOC14, 0, sizeof(LOC14));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_608), LOC14, 0);
}BeforeRet: ;
}
N_NIMCALL(void, genbreakstmt_546444_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI idx0;
Ropeobj178006* label0;
TY178507 LOC16;
idx0 = (*p0).breakidx;
{
Tsym292834* sym0;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA3;
sym0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
idx0 = (NI)((*sym0).position - ((NI) 1));
}
goto LA1;
LA3: ;
{
{
while (1) {
NIM_BOOL LOC8;
LOC8 = (NIM_BOOL)0;
LOC8 = (((NI) 0) <= idx0);
if (!(LOC8)) goto LA9;
LOC8 = !((*p0).blocks->data[idx0].isloop);
LA9: ;
if (!LOC8) goto LA7;
idx0 -= ((NI) 1);
} LA7: ;
}
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = (idx0 < ((NI) 0));
if (LOC12) goto LA13;
LOC12 = !((*p0).blocks->data[idx0].isloop);
LA13: ;
if (!LOC12) goto LA14;
internalerror_196100_155036129((*t0).info, ((NimStringDesc*) &T839829468_609));
}
LA14: ;
}
LA1: ;
label0 = assignlabel_544020_839829468((&(*p0).blocks->data[idx0]));
blockleaveactions_545442_839829468(p0, (NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) ((*p0).blocks->data[idx0].nestedtrystmts))), (NI)((*p0).inexceptblock - ((NI) ((*p0).blocks->data[idx0].nestedexceptstmts))));
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = label0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_556), LOC16, 1);
}
N_NIMCALL(NIM_BOOL, fielddiscriminantcheckneeded_549080_839829468)(Tcproc529021* p0, Tnode292802* asgn0) {
NIM_BOOL result0;
result0 = (NIM_BOOL)0;
{
Tnode292802* le0;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 2))&31U)))!=0)) goto LA3;
le0 = (*asgn0).kindU.S6.sons->data[((NI) 0)];
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 46))) goto LA7;
field0 = (*(*(*le0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA7: ;
{
Tsym292834* field0;
if (!((*le0).kind == ((Tnodekind292020) 45))) goto LA10;
field0 = (*(*le0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym;
result0 = (((*field0).flags &(1U<<((NU)(((Tsymflag292184) 18))&31U)))!=0);
}
goto LA5;
LA10: ;
LA5: ;
}
LA3: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, discriminatortabledecl_536094_839829468)(Tcgen529027* m0, Ttype292840* objtype0, Tsym292834* d0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
Ropeobj178006* tmp0;
TY532811 LOC2;
NI64 LOC3;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_130));
tmp0 = discriminatortablename_536057_839829468(m0, objtype0, d0);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = tmp0;
LOC3 = (NI64)0;
LOC3 = lengthord_320007_3876443242((*d0).typ);
LOC2[1] = rope_178401_2381377266((NI64)(LOC3 + IL64(1)));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_203), LOC2, 2);
return result0;
}
N_NIMCALL(void, gendiscriminantcheck_549144_839829468)(Tcproc529021* p0, Tloc292816* a0, Tloc292816* tmp0, Ttype292840* objtype0, Tsym292834* field0) {
Ttype292840* t0;
Ropeobj178006* LOC1;
NI64 L0;
TY535235 LOC8;
t0 = skiptypes_296099_850551059(objtype0, IL64(211106240964864));
LOC1 = (Ropeobj178006*)0;
LOC1 = gentypeinfo_535941_839829468((*p0).module, t0);
L0 = lengthord_320007_3876443242((*field0).typ);
{
NIM_BOOL LOC4;
TY178507 LOC7;
LOC4 = (NIM_BOOL)0;
LOC4 = containsorincl_268862_2627731572((&(*(*p0).module).declaredthings), (*field0).Sup.id);
if (!!(LOC4)) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = discriminatortabledecl_536094_839829468((*p0).module, t0, field0);
appcg_532640_839829468((*p0).module, ((Tcfilesection529005) 9), ((NimStringDesc*) &T839829468_610), LOC7, 1);
}
LA5: ;
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = rdloc_538188_839829468(a0);
LOC8[1] = rdloc_538188_839829468(tmp0);
LOC8[2] = discriminatortablename_536057_839829468((*p0).module, t0, field0);
LOC8[3] = intliteral_539270_839829468((NI64)(L0 + IL64(1)));
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_611), LOC8, 4);
}
N_NIMCALL(void, asgnfielddiscriminant_549209_839829468)(Tcproc529021* p0, Tnode292802* e0) {
Tloc292816 a0;
Tloc292816 tmp0;
Tnode292802* dotexpr0;
memset((void*)(&a0), 0, sizeof(a0));
memset((void*)(&tmp0), 0, sizeof(tmp0));
dotexpr0 = (*e0).kindU.S6.sons->data[((NI) 0)];
{
if (!((*dotexpr0).kind == ((Tnodekind292020) 46))) goto LA3;
dotexpr0 = (*dotexpr0).kindU.S6.sons->data[((NI) 0)];
}
LA3: ;
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
gettemp_537032_839829468(p0, a0.t, (&tmp0), NIM_FALSE);
expr_539248_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)], (&tmp0));
gendiscriminantcheck_549144_839829468(p0, (&a0), (&tmp0), (*(*dotexpr0).kindU.S6.sons->data[((NI) 0)]).typ, (*(*dotexpr0).kindU.S6.sons->data[((NI) 1)]).kindU.S4.sym);
genassignment_539264_839829468(p0, (&a0), (&tmp0), 0);
}
N_NIMCALL(void, genasgn_549239_839829468)(Tcproc529021* p0, Tnode292802* e0, NIM_BOOL fastasgn0) {
genlinedir_532823_839829468(p0, e0);
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = ((*(*e0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 3));
if (!(LOC3)) goto LA4;
LOC3 = (((*(*(*e0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym).flags &(1U<<((NU)(((Tsymflag292184) 30))&31U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
gengotovar_544258_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 1)]);
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC8;
Tloc292816 a0;
LOC8 = (NIM_BOOL)0;
LOC8 = fielddiscriminantcheckneeded_549080_839829468(p0, e0);
if (!!(LOC8)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
{
Tnode292802* LOC13;
Tnode292802* LOC16;
LOC13 = (Tnode292802*)0;
LOC13 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
if (!((*LOC13).kind == ((Tnodekind292020) 47) || (*LOC13).kind == ((Tnodekind292020) 65))) goto LA14;
LOC16 = (Tnode292802*)0;
LOC16 = HEX5BHEX5D_293238_850551059(e0, ((NI) 0));
genderef_543921_839829468(p0, LOC16, (&a0), NIM_TRUE);
}
goto LA11;
LA14: ;
{
initlocexpr_539283_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA11: ;
{
if (!fastasgn0) goto LA20;
a0.flags |= ((NU16)1)<<((((Tlocflag292810) 2))%(sizeof(NU16)*8));
}
LA20: ;
loadinto_543928_839829468(p0, (*e0).kindU.S6.sons->data[((NI) 0)], (*e0).kindU.S6.sons->data[((NI) 1)], (&a0));
}
goto LA1;
LA9: ;
{
asgnfielddiscriminant_549209_839829468(p0, e0);
}
LA1: ;
}
N_NIMCALL(Ropeobj178006*, genasmoremitstmt_548529_839829468)(Tcproc529021* p0, Tnode292802* t0, NIM_BOOL isasmstmt0) {
Ropeobj178006* result0;
NimStringDesc* res0;
result0 = (Ropeobj178006*)0;
res0 = copyString(((NimStringDesc*) &T839829468_490));
{
NI i_548547_839829468;
NI HEX3Atmp_548644_839829468;
NI LOC2;
NI res_548647_839829468;
i_548547_839829468 = (NI)0;
HEX3Atmp_548644_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(t0);
HEX3Atmp_548644_839829468 = (NI)(LOC2 - ((NI) 1));
res_548647_839829468 = ((NI) 0);
{
while (1) {
if (!(res_548647_839829468 <= HEX3Atmp_548644_839829468)) goto LA4;
i_548547_839829468 = res_548647_839829468;
switch ((*(*t0).kindU.S6.sons->data[i_548547_839829468]).kind) {
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
res0 = resizeString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval->Sup.len + 0);
appendString(res0, (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S3.strval);
}
break;
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*(*t0).kindU.S6.sons->data[i_548547_839829468]).kindU.S4.sym;
{
Tloc292816 a0;
Ropeobj178006* LOC11;
NimStringDesc* LOC12;
if (!((28672 &(1U<<((NU)((*sym0).kind)&31U)))!=0)) goto LA9;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[i_548547_839829468], (&a0));
LOC11 = (Ropeobj178006*)0;
LOC11 = rdloc_538188_839829468((&a0));
LOC12 = (NimStringDesc*)0;
LOC12 = HEX24_178856_2381377266(LOC11);
res0 = resizeString(res0, LOC12->Sup.len + 0);
appendString(res0, LOC12);
}
goto LA7;
LA9: ;
{
Ropeobj178006* LOC16;
NimStringDesc* LOC17;
if (!((*sym0).kind == ((Tsymkind292435) 7))) goto LA14;
LOC16 = (Ropeobj178006*)0;
LOC16 = gettypedesc_535673_839829468((*p0).module, (*sym0).typ);
LOC17 = (NimStringDesc*)0;
LOC17 = HEX24_178856_2381377266(LOC16);
res0 = resizeString(res0, LOC17->Sup.len + 0);
appendString(res0, LOC17);
}
goto LA7;
LA14: ;
{
Ropeobj178006* r0;
NimStringDesc* LOC23;
r0 = (*sym0).loc.r;
{
if (!(r0 == NIM_NIL)) goto LA21;
r0 = manglename_533205_839829468(sym0);
asgnRefNoCycle((void**) (&(*sym0).loc.r), r0);
}
LA21: ;
LOC23 = (NimStringDesc*)0;
LOC23 = HEX24_178856_2381377266(r0);
res0 = resizeString(res0, LOC23->Sup.len + 0);
appendString(res0, LOC23);
}
LA7: ;
}
break;
default:
{
internalerror_196100_155036129((*(*t0).kindU.S6.sons->data[i_548547_839829468]).info, ((NimStringDesc*) &T839829468_612));
}
break;
}
res_548647_839829468 += ((NI) 1);
} LA4: ;
}
}
{
NIM_BOOL LOC27;
LOC27 = (NIM_BOOL)0;
LOC27 = isasmstmt0;
if (!(LOC27)) goto LA28;
LOC27 = ((Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field20 &(1U<<((NU)(((Tinfoccprop273004) 5))&7U)))!=0);
LA28: ;
if (!LOC27) goto LA29;
{
NimStringDesc* x_548604_839829468;
NI first_548656_839829468;
NI last_548658_839829468;
x_548604_839829468 = (NimStringDesc*)0;
first_548656_839829468 = ((NI) 0);
last_548658_839829468 = ((NI) 0);
{
while (1) {
NI j0;
{
while (1) {
if (!!((((NU8)(res0->data[last_548658_839829468])) == ((NU8)(0)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(13)) || ((NU8)(res0->data[last_548658_839829468])) == ((NU8)(10))))) goto LA35;
last_548658_839829468 += ((NI) 1);
} LA35: ;
}
x_548604_839829468 = copyStrLast(res0, first_548656_839829468, (NI)(last_548658_839829468 - ((NI) 1)));
j0 = ((NI) 0);
{
while (1) {
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(32)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(9)))) goto LA37;
j0 += ((NI) 1);
} LA37: ;
}
{
if (!(((NU8)(x_548604_839829468->data[j0])) == ((NU8)(34)) || ((NU8)(x_548604_839829468->data[j0])) == ((NU8)(58)))) goto LA40;
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, tnl_176644_4151366050);
}
goto LA38;
LA40: ;
{
if (!!(((NU8)(x_548604_839829468->data[j0]) == (NU8)(0)))) goto LA43;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_613));
add_178487_2381377266(&result0, x_548604_839829468);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_614));
}
goto LA38;
LA43: ;
LA38: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA47;
last_548658_839829468 += ((NI) 1);
}
goto LA45;
LA47: ;
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(13))) goto LA50;
last_548658_839829468 += ((NI) 1);
{
if (!((NU8)(res0->data[last_548658_839829468]) == (NU8)(10))) goto LA54;
last_548658_839829468 += ((NI) 1);
}
LA54: ;
}
goto LA45;
LA50: ;
{
goto LA32;
}
LA45: ;
first_548656_839829468 = last_548658_839829468;
}
} LA32: ;
}
}
goto LA25;
LA29: ;
{
res0 = resizeString(res0, tnl_176644_4151366050->Sup.len + 0);
appendString(res0, tnl_176644_4151366050);
result0 = rope_178277_2381377266(res0);
}
LA25: ;
return result0;
}
N_NIMCALL(void, genasmstmt_548659_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
genlinedir_532823_839829468(p0, t0);
s0 = genasmoremitstmt_548529_839829468(p0, t0, NIM_TRUE);
{
TY178507 LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = s0;
addf_179205_2381377266(&(*(*p0).module).s[(((Tcfilesection529005) 7))- 0], Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = s0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field17, LOC7, 1);
}
LA1: ;
}
static N_INLINE(void, gensimpleblock_544095_839829468)(Tcproc529021* p0, Tnode292802* stmts0) {
TY533289 LOC1;
NI LOC2;
memset((void*)LOC1, 0, sizeof(LOC1));
LOC2 = (NI)0;
LOC2 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC1, 0);
genstmts_539244_839829468(p0, stmts0);
endblock_544060_839829468(p0);
}
N_NIMCALL(void, gentrycpp_547866_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
Ropeobj178006* exc0;
TY533289 LOC16;
NI LOC17;
NI length0;
TY178507 LOC18;
Ropeobj178006* LOC19;
NI i0;
NIM_BOOL catchallpresent0;
TY533289 LOC78;
Tnode292802* LOC79;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
genlinedir_532823_839829468(p0, t0);
exc0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC10;
Ropeobj178006* LOC13;
LOC10 = (Tsym292834*)0;
LOC10 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC10 == NIM_NIL))) goto LA11;
LOC13 = (Ropeobj178006*)0;
LOC13 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA8;
LA11: ;
{
Ropeobj178006* LOC15;
LOC15 = (Ropeobj178006*)0;
LOC15 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA8: ;
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC17 = (NI)0;
LOC17 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_617), LOC16, 0);
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
length0 = sonslen_295351_850551059(t0);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = exc0;
LOC19 = (Ropeobj178006*)0;
LOC19 = ropecg_532407_839829468((*p0).module, ((NimStringDesc*) &T839829468_618), LOC18, 1);
endblock_544035_839829468(p0, LOC19);
{
TY533289 LOC24;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC24, 0);
}
LA22: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
catchallpresent0 = NIM_FALSE;
{
while (1) {
NIM_BOOL LOC27;
NI blen0;
LOC27 = (NIM_BOOL)0;
LOC27 = (i0 < length0);
if (!(LOC27)) goto LA28;
LOC27 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA28: ;
if (!LOC27) goto LA26;
{
NIM_BOOL LOC31;
LOC31 = (NIM_BOOL)0;
LOC31 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC31)) goto LA32;
LOC31 = isemptytype_297441_850551059((*t0).typ);
LA32: ;
if (!LOC31) goto LA33;
(*d0).k = ((Tlockind292808) 0);
}
LA33: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
Ropeobj178006** LOC39;
TY533289 LOC40;
if (!(((NI) 1) < i0)) goto LA37;
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
memset((void*)LOC40, 0, sizeof(LOC40));
addf_179205_2381377266(LOC39, ((NimStringDesc*) &T839829468_620), LOC40, 0);
}
LA37: ;
{
TY533289 LOC45;
NI LOC46;
TY533289 LOC47;
if (!(blen0 == ((NI) 1))) goto LA43;
catchallpresent0 = NIM_TRUE;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC46 = (NI)0;
LOC46 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC45, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC47, 0);
endblock_544060_839829468(p0);
}
goto LA41;
LA43: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC57;
TY533289 LOC58;
NI LOC59;
TY533289 LOC60;
orexpr0 = NIM_NIL;
{
NI j_547979_839829468;
NI HEX3Atmp_548101_839829468;
NI res_548104_839829468;
j_547979_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)0;
HEX3Atmp_548101_839829468 = (NI)(blen0 - ((NI) 2));
res_548104_839829468 = ((NI) 0);
{
while (1) {
TY532811 LOC56;
if (!(res_548104_839829468 <= HEX3Atmp_548101_839829468)) goto LA51;
j_547979_839829468 = res_548104_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA54;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA54: ;
memset((void*)LOC56, 0, sizeof(LOC56));
LOC56[0] = exc0;
LOC56[1] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_547979_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, ((NimStringDesc*) &T839829468_621), LOC56, 2);
res_548104_839829468 += ((NI) 1);
} LA51: ;
}
}
memset((void*)LOC57, 0, sizeof(LOC57));
LOC57[0] = orexpr0;
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_622), LOC57, 1);
memset((void*)LOC58, 0, sizeof(LOC58));
LOC59 = (NI)0;
LOC59 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC58, 0);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC60, 0, sizeof(LOC60));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC60, 0);
endblock_544060_839829468(p0);
}
LA41: ;
i0 += ((NI) 1);
} LA26: ;
}
{
TY533289 LOC70;
NI LOC71;
Tnode292802* finallyblock0;
TY533289 LOC76;
Ropeobj178006* LOC77;
if (!!(catchallpresent0)) goto LA63;
{
TY533289 LOC69;
if (!(((NI) 1) < i0)) goto LA67;
memset((void*)LOC69, 0, sizeof(LOC69));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620), LOC69, 0);
}
LA67: ;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC71 = (NI)0;
LOC71 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC70, 0);
finallyblock0 = lastson_295364_850551059(t0);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA74;
genstmts_539244_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA74: ;
memset((void*)LOC76, 0, sizeof(LOC76));
LOC77 = (Ropeobj178006*)0;
LOC77 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC76, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC77);
endblock_544060_839829468(p0);
}
LA63: ;
memset((void*)LOC78, 0, sizeof(LOC78));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC78, 0);
(*p0).inexceptblock -= ((NI) 1);
LOC79 = (Tnode292802*)0;
LOC79 = pop_318246_1689653243((&(*p0).nestedtrystmts));
{
NIM_BOOL LOC82;
LOC82 = (NIM_BOOL)0;
LOC82 = (i0 < length0);
if (!(LOC82)) goto LA83;
LOC82 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA83: ;
if (!LOC82) goto LA84;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
}
LA84: ;
}
N_NIMCALL(void, line_532695_839829468)(Tcproc529021* p0, Tcprocsection529011 s0, NimStringDesc* r0) {
Ropeobj178006** LOC1;
Ropeobj178006* LOC2;
Ropeobj178006* LOC3;
LOC1 = (Ropeobj178006**)0;
LOC1 = s_529179_3723162438(p0, s0);
LOC2 = (Ropeobj178006*)0;
LOC2 = rope_178277_2381377266(r0);
LOC3 = (Ropeobj178006*)0;
LOC3 = indentline_532656_839829468(p0, LOC2);
add_178482_2381377266(LOC1, LOC3);
}
static N_INLINE(Ropeobj178006*, pop_178530_1689653243)(TY191350** s0) {
Ropeobj178006* result0;
NI L0;
result0 = (Ropeobj178006*)0;
L0 = (NI)(((*s0) ? (*s0)->Sup.len : 0) - ((NI) 1));
result0 = (*s0)->data[L0];
(*s0) = (TY191350*) setLengthSeq(&((*s0))->Sup, sizeof(Ropeobj178006*), ((NI) (L0)));
return result0;
}
N_NIMCALL(void, gentry_548114_839829468)(Tcproc529021* p0, Tnode292802* t0, Tloc292816* d0) {
NIM_BOOL LOC8;
Ropeobj178006* safepoint0;
TY178507 LOC17;
TY178507 LOC18;
TY178507 LOC37;
NI LOC38;
NI length0;
TY533289 LOC39;
TY533289 LOC40;
NI LOC41;
TY533289 LOC42;
NI i0;
Tnode292802* LOC95;
TY178507 LOC103;
{
NIM_BOOL LOC3;
NIM_BOOL LOC4;
LOC3 = (NIM_BOOL)0;
LOC4 = (NIM_BOOL)0;
LOC4 = isemptytype_297441_850551059((*t0).typ);
LOC3 = !(LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*d0).k == ((Tlockind292808) 0));
LA5: ;
if (!LOC3) goto LA6;
gettemp_537032_839829468(p0, (*t0).typ, d0, NIM_FALSE);
}
LA6: ;
LOC8 = (NIM_BOOL)0;
LOC8 = includestr_147249_3771138726((&(*(*p0).module).headerfiles), ((NimStringDesc*) &T839829468_624));
genlinedir_532823_839829468(p0, t0);
safepoint0 = gettempname_533598_839829468((*p0).module);
{
Tsym292834* LOC11;
Ropeobj178006* LOC14;
LOC11 = (Tsym292834*)0;
LOC11 = getcompilerproc_338748_3937434831(((NimStringDesc*) &T839829468_615));
if (!!((LOC11 == NIM_NIL))) goto LA12;
LOC14 = (Ropeobj178006*)0;
LOC14 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_615));
}
goto LA9;
LA12: ;
{
Ropeobj178006* LOC16;
LOC16 = (Ropeobj178006*)0;
LOC16 = cgsym_532403_839829468((*p0).module, ((NimStringDesc*) &T839829468_616));
}
LA9: ;
memset((void*)LOC17, 0, sizeof(LOC17));
LOC17[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 0), ((NimStringDesc*) &T839829468_625), LOC17, 1);
memset((void*)LOC18, 0, sizeof(LOC18));
LOC18[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_626), LOC18, 1);
{
NIM_BOOL LOC21;
TY178507 LOC24;
LOC21 = (NIM_BOOL)0;
LOC21 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_627));
if (!LOC21) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC24[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC24, 1);
}
goto LA19;
LA22: ;
{
NIM_BOOL LOC26;
TY178507 LOC29;
LOC26 = (NIM_BOOL)0;
LOC26 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_629));
if (!LOC26) goto LA27;
memset((void*)LOC29, 0, sizeof(LOC29));
LOC29[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_630), LOC29, 1);
}
goto LA19;
LA27: ;
{
NIM_BOOL LOC31;
TY178507 LOC34;
LOC31 = (NIM_BOOL)0;
LOC31 = isdefined_200011_1967573533(((NimStringDesc*) &T839829468_631));
if (!LOC31) goto LA32;
memset((void*)LOC34, 0, sizeof(LOC34));
LOC34[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_632), LOC34, 1);
}
goto LA19;
LA32: ;
{
TY178507 LOC36;
memset((void*)LOC36, 0, sizeof(LOC36));
LOC36[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_628), LOC36, 1);
}
LA19: ;
memset((void*)LOC37, 0, sizeof(LOC37));
LOC37[0] = safepoint0;
LOC38 = (NI)0;
LOC38 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_633), LOC37, 1);
length0 = sonslen_295351_850551059(t0);
(*p0).nestedtrystmts = (Tnodeseq292796*) incrSeqV2(&((*p0).nestedtrystmts)->Sup, sizeof(Tnode292802*));
asgnRefNoCycle((void**) (&(*p0).nestedtrystmts->data[(*p0).nestedtrystmts->Sup.len]), t0);
++(*p0).nestedtrystmts->Sup.len;
expr_539248_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC39, 0, sizeof(LOC39));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC39, 0);
endblock_544060_839829468(p0);
memset((void*)LOC40, 0, sizeof(LOC40));
LOC41 = (NI)0;
LOC41 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_634), LOC40, 0);
memset((void*)LOC42, 0, sizeof(LOC42));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_605), LOC42, 0);
{
TY533289 LOC47;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0)) goto LA45;
memset((void*)LOC47, 0, sizeof(LOC47));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_619), LOC47, 0);
}
LA45: ;
(*p0).inexceptblock += ((NI) 1);
i0 = ((NI) 1);
{
while (1) {
NIM_BOOL LOC50;
NI blen0;
LOC50 = (NIM_BOOL)0;
LOC50 = (i0 < length0);
if (!(LOC50)) goto LA51;
LOC50 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 87));
LA51: ;
if (!LOC50) goto LA49;
{
NIM_BOOL LOC54;
LOC54 = (NIM_BOOL)0;
LOC54 = ((*d0).k == ((Tlockind292808) 1));
if (!(LOC54)) goto LA55;
LOC54 = isemptytype_297441_850551059((*t0).typ);
LA55: ;
if (!LOC54) goto LA56;
(*d0).k = ((Tlockind292808) 0);
}
LA56: ;
blen0 = sonslen_295351_850551059((*t0).kindU.S6.sons->data[i0]);
{
TY533289 LOC67;
NI LOC68;
TY178507 LOC69;
TY533289 LOC70;
if (!(blen0 == ((NI) 1))) goto LA60;
{
TY533289 LOC66;
if (!(((NI) 1) < i0)) goto LA64;
memset((void*)LOC66, 0, sizeof(LOC66));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_635), LOC66, 0);
}
LA64: ;
memset((void*)LOC67, 0, sizeof(LOC67));
LOC68 = (NI)0;
LOC68 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC67, 0);
memset((void*)LOC69, 0, sizeof(LOC69));
LOC69[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC69, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)], d0);
memset((void*)LOC70, 0, sizeof(LOC70));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC70, 0);
endblock_544060_839829468(p0);
}
goto LA58;
LA60: ;
{
Ropeobj178006* orexpr0;
TY178507 LOC91;
NI LOC92;
TY178507 LOC93;
TY533289 LOC94;
orexpr0 = NIM_NIL;
{
NI j_548247_839829468;
NI HEX3Atmp_548521_839829468;
NI res_548524_839829468;
j_548247_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)0;
HEX3Atmp_548521_839829468 = (NI)(blen0 - ((NI) 2));
res_548524_839829468 = ((NI) 0);
{
while (1) {
NimStringDesc* isobjformat0;
TY178507 LOC86;
if (!(res_548524_839829468 <= HEX3Atmp_548521_839829468)) goto LA74;
j_548247_839829468 = res_548524_839829468;
{
if (!!((orexpr0 == NIM_NIL))) goto LA77;
add_178487_2381377266(&orexpr0, ((NimStringDesc*) &T839829468_229));
}
LA77: ;
{
NIM_BOOL LOC81;
LOC81 = (NIM_BOOL)0;
LOC81 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC81) goto LA82;
LOC81 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA82: ;
if (!!(LOC81)) goto LA83;
isobjformat0 = copyString(((NimStringDesc*) &T839829468_637));
}
goto LA79;
LA83: ;
{
isobjformat0 = copyString(((NimStringDesc*) &T839829468_638));
}
LA79: ;
memset((void*)LOC86, 0, sizeof(LOC86));
LOC86[0] = gentypeinfo_535941_839829468((*p0).module, (*(*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[j_548247_839829468]).typ);
appcg_532632_839829468((*p0).module, &orexpr0, isobjformat0, LOC86, 1);
res_548524_839829468 += ((NI) 1);
} LA74: ;
}
}
{
if (!(((NI) 1) < i0)) goto LA89;
line_532695_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_620));
}
LA89: ;
memset((void*)LOC91, 0, sizeof(LOC91));
LOC91[0] = orexpr0;
LOC92 = (NI)0;
LOC92 = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_639), LOC91, 1);
memset((void*)LOC93, 0, sizeof(LOC93));
LOC93[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_636), LOC93, 1);
expr_539248_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[(NI)(blen0 - ((NI) 1))], d0);
memset((void*)LOC94, 0, sizeof(LOC94));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_606), LOC94, 0);
endblock_544060_839829468(p0);
}
LA58: ;
i0 += ((NI) 1);
} LA49: ;
}
(*p0).inexceptblock -= ((NI) 1);
LOC95 = (Tnode292802*)0;
LOC95 = pop_318246_1689653243((&(*p0).nestedtrystmts));
endblock_544060_839829468(p0);
{
NIM_BOOL LOC98;
Ropeobj178006* LOC102;
LOC98 = (NIM_BOOL)0;
LOC98 = (i0 < length0);
if (!(LOC98)) goto LA99;
LOC98 = ((*(*t0).kindU.S6.sons->data[i0]).kind == ((Tnodekind292020) 107));
LA99: ;
if (!LOC98) goto LA100;
(*p0).finallysafepoints = (TY191350*) incrSeqV2(&((*p0).finallysafepoints)->Sup, sizeof(Ropeobj178006*));
asgnRefNoCycle((void**) (&(*p0).finallysafepoints->data[(*p0).finallysafepoints->Sup.len]), safepoint0);
++(*p0).finallysafepoints->Sup.len;
gensimpleblock_544095_839829468(p0, (*(*t0).kindU.S6.sons->data[i0]).kindU.S6.sons->data[((NI) 0)]);
LOC102 = (Ropeobj178006*)0;
LOC102 = pop_178530_1689653243((&(*p0).finallysafepoints));
}
LA100: ;
memset((void*)LOC103, 0, sizeof(LOC103));
LOC103[0] = safepoint0;
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_640), LOC103, 1);
}
N_NIMCALL(NimStringDesc*, getraisefrmt_546824_839829468)(Tcproc529021* p0) {
NimStringDesc* result0;
result0 = (NimStringDesc*)0;
result0 = copyString(((NimStringDesc*) &T839829468_641));
return result0;
}
N_NIMCALL(void, genraisestmt_546828_839829468)(Tcproc529021* p0, Tnode292802* t0) {
{
Tnode292802* finallyblock0;
if (!(((NI) 0) < (*p0).inexceptblock)) goto LA3;
finallyblock0 = lastson_295364_850551059((*p0).nestedtrystmts->data[(NI)(((*p0).nestedtrystmts ? (*p0).nestedtrystmts->Sup.len : 0) - ((NI) 1))]);
{
if (!((*finallyblock0).kind == ((Tnodekind292020) 107))) goto LA7;
gensimpleblock_544095_839829468(p0, (*finallyblock0).kindU.S6.sons->data[((NI) 0)]);
}
LA7: ;
}
LA3: ;
{
Tloc292816 a0;
Ropeobj178006* e0;
Ttype292840* typ0;
NimStringDesc* LOC13;
TY532811 LOC14;
if (!!(((*(*t0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA11;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 0)], (&a0));
e0 = rdloc_538188_839829468((&a0));
typ0 = skiptypes_296099_850551059((*(*t0).kindU.S6.sons->data[((NI) 0)]).typ, IL64(211106247256320));
genlinedir_532823_839829468(p0, t0);
LOC13 = (NimStringDesc*)0;
LOC13 = getraisefrmt_546824_839829468(p0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = e0;
LOC14[1] = makecstring_191638_155036129((*(*(*typ0).sym).name).s);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), LOC13, LOC14, 2);
}
goto LA9;
LA11: ;
{
genlinedir_532823_839829468(p0, t0);
{
NIM_BOOL LOC18;
NIM_BOOL LOC19;
TY533289 LOC24;
Ropeobj178006* LOC25;
LOC18 = (NIM_BOOL)0;
LOC19 = (NIM_BOOL)0;
LOC19 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC19) goto LA20;
LOC19 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA20: ;
LOC18 = LOC19;
if (!(LOC18)) goto LA21;
LOC18 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA21: ;
if (!LOC18) goto LA22;
memset((void*)LOC24, 0, sizeof(LOC24));
LOC25 = (Ropeobj178006*)0;
LOC25 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_623), LOC24, 0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), LOC25);
}
goto LA16;
LA22: ;
{
TY533289 LOC27;
memset((void*)LOC27, 0, sizeof(LOC27));
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_642), LOC27, 0);
}
LA16: ;
}
LA9: ;
}
N_NIMCALL(void, gentypesection_538184_839829468)(Tcgen529027* m0, Tnode292802* n0) {
}
N_NIMCALL(Tcfilesection529005, determinesection_548819_839829468)(Tnode292802* n0) {
Tcfilesection529005 result0;
result0 = (Tcfilesection529005)0;
result0 = ((Tcfilesection529005) 7);
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* sec0;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (((NI) 1) <= LOC4);
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind >= ((Tnodekind292020) 20) && (*(*n0).kindU.S6.sons->data[((NI) 0)]).kind <= ((Tnodekind292020) 22));
LA5: ;
if (!LOC3) goto LA6;
sec0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S3.strval;
{
NIM_BOOL LOC10;
LOC10 = (NIM_BOOL)0;
LOC10 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_643));
if (!LOC10) goto LA11;
result0 = ((Tcfilesection529005) 3);
}
goto LA8;
LA11: ;
{
NIM_BOOL LOC14;
LOC14 = (NIM_BOOL)0;
LOC14 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_644));
if (!LOC14) goto LA15;
result0 = ((Tcfilesection529005) 9);
}
goto LA8;
LA15: ;
{
NIM_BOOL LOC18;
LOC18 = (NIM_BOOL)0;
LOC18 = nsuStartsWith(sec0, ((NimStringDesc*) &T839829468_645));
if (!LOC18) goto LA19;
result0 = ((Tcfilesection529005) 1);
}
goto LA8;
LA19: ;
LA8: ;
}
LA6: ;
return result0;
}
N_NIMCALL(void, genemit_548839_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Ropeobj178006* s0;
s0 = genasmoremitstmt_548529_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 1)], NIM_FALSE);
{
Tcfilesection529005 section0;
Tnode292802* LOC5;
if (!((*p0).prc == NIM_NIL)) goto LA3;
LOC5 = (Tnode292802*)0;
LOC5 = HEX5BHEX5D_293238_850551059(t0, ((NI) 1));
section0 = determinesection_548819_839829468(LOC5);
genclinedir_532813_839829468(&(*(*p0).module).s[(section0)- 0], (*t0).info);
add_178482_2381377266(&(*(*p0).module).s[(section0)- 0], s0);
}
goto LA1;
LA3: ;
{
genlinedir_532823_839829468(p0, t0);
line_532690_839829468(p0, ((Tcprocsection529011) 2), s0);
}
LA1: ;
}
N_NIMCALL(void, genbreakpoint_548862_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NimStringDesc* name0;
name0 = (NimStringDesc*)0;
{
TY535238 LOC12;
NI LOC13;
NimStringDesc* LOC14;
if (!(((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA3;
{
if (!((*t0).kind == ((Tnodekind292020) 34))) goto LA7;
name0 = nsuNormalize((*(*t0).kindU.S6.sons->data[((NI) 1)]).kindU.S3.strval);
}
goto LA5;
LA7: ;
{
NimStringDesc* LOC10;
NimStringDesc* LOC11;
breakpointid_548860_839829468 += ((NI) 1);
LOC10 = (NimStringDesc*)0;
LOC11 = (NimStringDesc*)0;
LOC11 = nimIntToStr(breakpointid_548860_839829468);
LOC10 = rawNewString(LOC11->Sup.len + 2);
appendString(LOC10, ((NimStringDesc*) &T839829468_646));
appendString(LOC10, LOC11);
name0 = LOC10;
}
LA5: ;
genlinedir_532823_839829468(p0, t0);
memset((void*)LOC12, 0, sizeof(LOC12));
LOC13 = (NI)0;
LOC13 = tolinenumber_192415_155036129((*t0).info);
LOC12[0] = rope_178401_2381377266(((NI64) (LOC13)));
LOC14 = (NimStringDesc*)0;
LOC14 = tofilename_192257_155036129((*t0).info.fileindex);
LOC12[1] = makecstring_191638_155036129(LOC14);
LOC12[2] = makecstring_191638_155036129(name0);
appcg_532632_839829468((*p0).module, &gbreakpoints_548861_839829468, ((NimStringDesc*) &T839829468_647), LOC12, 3);
}
LA3: ;
}
N_NIMCALL(void, genwatchpoint_549016_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
Ttype292840* typ0;
TY535238 LOC5;
NimStringDesc* LOC6;
{ {
if (!!((((*p0).options &(1U<<((NU)(((Toption169009) 17))&31U)))!=0))) goto LA3;
goto BeforeRet;
}
LA3: ;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 1)], (&a0));
typ0 = skiptypes_296099_850551059((*(*n0).kindU.S6.sons->data[((NI) 1)]).typ, IL64(211106242013440));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = addrloc_538204_839829468((&a0));
LOC6 = (NimStringDesc*)0;
LOC6 = rendertree_311044_382274130((*n0).kindU.S6.sons->data[((NI) 1)], 0);
LOC5[1] = makecstring_191638_155036129(LOC6);
LOC5[2] = gentypeinfo_535941_839829468((*p0).module, typ0);
linecg_532707_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_648), LOC5, 3);
}BeforeRet: ;
}
N_NIMCALL(void, genpragma_549039_839829468)(Tcproc529021* p_549041_839829468, Tnode292802* n0) {
{
NI i_549054_839829468;
NI HEX3Atmp_549073_839829468;
NI LOC2;
NI res_549076_839829468;
i_549054_839829468 = (NI)0;
HEX3Atmp_549073_839829468 = (NI)0;
LOC2 = (NI)0;
LOC2 = sonslen_295351_850551059(n0);
HEX3Atmp_549073_839829468 = (NI)(LOC2 - ((NI) 1));
res_549076_839829468 = ((NI) 0);
{
while (1) {
Tnode292802* it0;
Tspecialword275003 LOC5;
if (!(res_549076_839829468 <= HEX3Atmp_549073_839829468)) goto LA4;
i_549054_839829468 = res_549076_839829468;
it0 = (*n0).kindU.S6.sons->data[i_549054_839829468];
LOC5 = (Tspecialword275003)0;
LOC5 = whichpragma_318911_2616423590(it0);
switch (LOC5) {
case ((Tspecialword275003) 191):
{
genemit_548839_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 131):
{
genbreakpoint_548862_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 176):
{
genwatchpoint_549016_839829468(p_549041_839829468, it0);
}
break;
case ((Tspecialword275003) 183):
{
Tcproc529021* p0;
Ropeobj178006** LOC10;
p0 = newproc_529206_3723162438(NIM_NIL, (*p_549041_839829468).module);
(*p0).options = ((*p0).options & ~ 98304);
genstmts_539244_839829468(p0, (*it0).kindU.S6.sons->data[((NI) 1)]);
LOC10 = (Ropeobj178006**)0;
LOC10 = s_529179_3723162438(p0, ((Tcprocsection529011) 2));
asgnRefNoCycle((void**) (&(*(*p0).module).injectstmt), (*LOC10));
}
break;
default:
{
}
break;
}
res_549076_839829468 += ((NI) 1);
} LA4: ;
}
}
}
N_NIMCALL(void, genparforstmt_546208_839829468)(Tcproc529021* p0, Tnode292802* t0) {
NI oldbreakidx_546411_839829468;
Tsym292834* forloopvar0;
Tloc292816 rangea0;
Tloc292816 rangeb0;
Tnode292802* call0;
TY535235 LOC1;
NimStringDesc* LOC2;
TY533289 LOC3;
(*p0).withinloop += ((NI) 1);
genlinedir_532823_839829468(p0, t0);
oldbreakidx_546411_839829468 = (*p0).breakidx;
forloopvar0 = (*(*t0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
memset((void*)(&rangea0), 0, sizeof(rangea0));
memset((void*)(&rangeb0), 0, sizeof(rangeb0));
assignlocalvar_538614_839829468(p0, forloopvar0);
call0 = (*t0).kindU.S6.sons->data[((NI) 1)];
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 1)], (&rangea0));
initlocexpr_539283_839829468(p0, (*call0).kindU.S6.sons->data[((NI) 2)], (&rangeb0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&(*forloopvar0).loc));
LOC1[1] = rdloc_538188_839829468((&rangea0));
LOC1[2] = rdloc_538188_839829468((&rangeb0));
LOC2 = (NimStringDesc*)0;
LOC2 = getstr_297230_850551059((*call0).kindU.S6.sons->data[((NI) 3)]);
LOC1[3] = rope_178277_2381377266(LOC2);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_649), LOC1, 4);
memset((void*)LOC3, 0, sizeof(LOC3));
(*p0).breakidx = startblock_543978_839829468(p0, ((NimStringDesc*) &T839829468_273), LOC3, 0);
(*p0).blocks->data[(*p0).breakidx].isloop = NIM_TRUE;
genstmts_539244_839829468(p0, (*t0).kindU.S6.sons->data[((NI) 2)]);
endblock_544060_839829468(p0);
(*p0).breakidx = oldbreakidx_546411_839829468;
(*p0).withinloop -= ((NI) 1);
}
N_NIMCALL(void, genstate_544117_839829468)(Tcproc529021* p0, Tnode292802* n0) {
NI64 idx0;
TY178507 LOC9;
{
NIM_BOOL LOC3;
NI LOC4;
NimStringDesc* LOC8;
LOC3 = (NIM_BOOL)0;
LOC4 = (NI)0;
LOC4 = len_293081_850551059(n0);
LOC3 = (LOC4 == ((NI) 1));
if (!(LOC3)) goto LA5;
LOC3 = ((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 6));
LA5: ;
if (!!(LOC3)) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC8 = HEX24_196185_1689653243(T839829468_650);
internalerror_196113_155036129(LOC8);
}
LA6: ;
idx0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S1.intval;
memset((void*)LOC9, 0, sizeof(LOC9));
LOC9[0] = rope_178401_2381377266(idx0);
linefmt_532714_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_652), LOC9, 1);
}
N_NIMCALL(void, gengotostate_544144_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
TY178507 LOC1;
TY533289 LOC2;
TY533289 LOC7;
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_603), LOC1, 1);
(*p0).beforeretneeded = NIM_TRUE;
memset((void*)LOC2, 0, sizeof(LOC2));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_653), LOC2, 0);
{
NI64 i_544214_839829468;
NI64 HEX3Atmp_544223_839829468;
NI64 res_544226_839829468;
i_544214_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = (NI64)0;
HEX3Atmp_544223_839829468 = lastord_320004_3876443242((*(*n0).kindU.S6.sons->data[((NI) 0)]).typ);
res_544226_839829468 = IL64(0);
{
while (1) {
TY178507 LOC6;
if (!(res_544226_839829468 <= HEX3Atmp_544223_839829468)) goto LA5;
i_544214_839829468 = res_544226_839829468;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = rope_178401_2381377266(i_544214_839829468);
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_654), LOC6, 1);
res_544226_839829468 += ((NI) 1);
} LA5: ;
}
}
memset((void*)LOC7, 0, sizeof(LOC7));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_160), LOC7, 0);
}
N_NIMCALL(void, genbreakstate_544229_839829468)(Tcproc529021* p0, Tnode292802* n0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
{
TY178507 LOC5;
if (!((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 155))) goto LA3;
initlocexpr_539283_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S6.sons->data[((NI) 1)], (&a0));
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_655), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY178507 LOC7;
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rdloc_538188_839829468((&a0));
linef_532700_839829468(p0, ((Tcprocsection529011) 2), ((NimStringDesc*) &T839829468_656), LOC7, 1);
}
LA1: ;
}
N_NIMCALL(void, expr_539248_839829468)(Tcproc529021* p0, Tnode292802* n0, Tloc292816* d0) {
switch ((*n0).kind) {
case ((Tnodekind292020) 3):
{
Tsym292834* sym0;
sym0 = (*n0).kindU.S4.sym;
switch ((*sym0).kind) {
case ((Tsymkind292435) 13):
{
{
if (!!(((33554448 & (*sym0).flags) == 0))) goto LA5;
fillprocloc_539201_839829468(sym0);
genprocprototype_539254_839829468((*p0).module, sym0);
}
goto LA3;
LA5: ;
{
genproc_532951_839829468((*p0).module, sym0);
}
LA3: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 12):
case ((Tsymkind292435) 15):
case ((Tsymkind292435) 14):
{
{
NimStringDesc* LOC13;
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0)) goto LA11;
LOC13 = (NimStringDesc*)0;
LOC13 = rawNewString((*(*sym0).name).s->Sup.len + 48);
appendString(LOC13, ((NimStringDesc*) &T839829468_270));
appendString(LOC13, (*(*sym0).name).s);
localerror_196085_155036129((*n0).info, LOC13);
}
LA11: ;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC16;
NimStringDesc* LOC20;
LOC16 = (NIM_BOOL)0;
LOC16 = ((*sym0).loc.r == NIM_NIL);
if (LOC16) goto LA17;
LOC16 = ((*sym0).loc.t == NIM_NIL);
LA17: ;
if (!LOC16) goto LA18;
LOC20 = (NimStringDesc*)0;
LOC20 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC20, ((NimStringDesc*) &T839829468_271));
appendString(LOC20, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC20);
}
LA18: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 10):
{
{
NIM_BOOL LOC24;
Ropeobj178006* LOC27;
LOC24 = (NIM_BOOL)0;
LOC24 = issimpleconst_532311_839829468((*sym0).typ);
if (!LOC24) goto LA25;
LOC27 = (Ropeobj178006*)0;
LOC27 = genliteral_549476_839829468(p0, (*sym0).ast, (*sym0).typ);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC27, ((Tstorageloc292812) 1));
}
goto LA22;
LA25: ;
{
gencomplexconst_558249_839829468(p0, sym0, d0);
}
LA22: ;
}
break;
case ((Tsymkind292435) 19):
{
Ropeobj178006* LOC30;
LOC30 = (Ropeobj178006*)0;
LOC30 = rope_178401_2381377266(((NI64) ((*sym0).position)));
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC30, ((Tstorageloc292812) 0));
}
break;
case ((Tsymkind292435) 8):
case ((Tsymkind292435) 20):
case ((Tsymkind292435) 11):
case ((Tsymkind292435) 9):
{
{
if (!!(((4194312 & (*sym0).flags) == 0))) goto LA34;
genvarprototype_539236_839829468((*p0).module, sym0);
}
LA34: ;
{
NIM_BOOL LOC38;
NimStringDesc* LOC42;
NimStringDesc* LOC43;
LOC38 = (NIM_BOOL)0;
LOC38 = ((*sym0).loc.r == NIM_NIL);
if (LOC38) goto LA39;
LOC38 = ((*sym0).loc.t == NIM_NIL);
LA39: ;
if (!LOC38) goto LA40;
LOC42 = (NimStringDesc*)0;
LOC43 = (NimStringDesc*)0;
LOC43 = nimIntToStr((*sym0).Sup.id);
LOC42 = rawNewString((*(*sym0).name).s->Sup.len + LOC43->Sup.len + 20);
appendString(LOC42, ((NimStringDesc*) &T839829468_285));
appendString(LOC42, (*(*sym0).name).s);
appendString(LOC42, ((NimStringDesc*) &T839829468_12));
appendString(LOC42, LOC43);
internalerror_196100_155036129((*n0).info, LOC42);
}
LA40: ;
{
if (!(((*sym0).flags &(1U<<((NU)(((Tsymflag292184) 22))&31U)))!=0)) goto LA46;
accessthreadlocalvar_532945_839829468(p0, sym0);
{
NIM_BOOL LOC50;
Ropeobj178006* LOC53;
LOC50 = (NIM_BOOL)0;
LOC50 = emulatedthreadvars_532949_839829468();
if (!LOC50) goto LA51;
LOC53 = (Ropeobj178006*)0;
LOC53 = HEX26_178452_2381377266(((NimStringDesc*) &T839829468_288), (*sym0).loc.r);
putintodest_550468_839829468(p0, d0, (*sym0).loc.t, LOC53, ((Tstorageloc292812) 0));
}
goto LA48;
LA51: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA48: ;
}
goto LA44;
LA46: ;
{
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
LA44: ;
}
break;
case ((Tsymkind292435) 5):
{
{
NIM_BOOL LOC59;
NimStringDesc* LOC63;
NimStringDesc* LOC64;
LOC59 = (NIM_BOOL)0;
LOC59 = ((*sym0).loc.r == NIM_NIL);
if (LOC59) goto LA60;
LOC59 = ((*sym0).loc.t == NIM_NIL);
LA60: ;
if (!LOC59) goto LA61;
LOC63 = (NimStringDesc*)0;
LOC64 = (NimStringDesc*)0;
LOC64 = nimIntToStr((*sym0).Sup.id);
LOC63 = rawNewString((*(*sym0).name).s->Sup.len + LOC64->Sup.len + 21);
appendString(LOC63, ((NimStringDesc*) &T839829468_289));
appendString(LOC63, (*(*sym0).name).s);
appendString(LOC63, ((NimStringDesc*) &T839829468_12));
appendString(LOC63, LOC64);
internalerror_196100_155036129((*n0).info, LOC63);
}
LA61: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tsymkind292435) 3):
{
{
NIM_BOOL LOC68;
NimStringDesc* LOC72;
NimStringDesc* LOC73;
LOC68 = (NIM_BOOL)0;
LOC68 = ((*sym0).loc.r == NIM_NIL);
if (LOC68) goto LA69;
LOC68 = ((*sym0).loc.t == NIM_NIL);
LA69: ;
if (!LOC68) goto LA70;
LOC72 = (NimStringDesc*)0;
LOC73 = (NimStringDesc*)0;
LOC73 = nimIntToStr((*sym0).Sup.id);
LOC72 = rawNewString((*(*sym0).name).s->Sup.len + LOC73->Sup.len + 22);
appendString(LOC72, ((NimStringDesc*) &T839829468_290));
appendString(LOC72, (*(*sym0).name).s);
appendString(LOC72, ((NimStringDesc*) &T839829468_12));
appendString(LOC72, LOC73);
internalerror_196100_155036129((*n0).info, LOC72);
}
LA70: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
default:
{
NimStringDesc* LOC75;
LOC75 = (NimStringDesc*)0;
LOC75 = rawNewString(reprEnum((NI)(*sym0).kind, (&NTI292435))->Sup.len + 22);
appendString(LOC75, ((NimStringDesc*) &T839829468_291));
appendString(LOC75, reprEnum((NI)(*sym0).kind, (&NTI292435)));
appendString(LOC75, ((NimStringDesc*) &T839829468_292));
internalerror_196100_155036129((*n0).info, LOC75);
}
break;
}
}
break;
case ((Tnodekind292020) 23):
{
{
NIM_BOOL LOC79;
Ropeobj178006* LOC82;
LOC79 = (NIM_BOOL)0;
LOC79 = isemptytype_297441_850551059((*n0).typ);
if (!!(LOC79)) goto LA80;
LOC82 = (Ropeobj178006*)0;
LOC82 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC82, ((Tstorageloc292812) 0));
}
LA80: ;
}
break;
case ((Tnodekind292020) 20) ... ((Tnodekind292020) 22):
{
Ropeobj178006* LOC84;
LOC84 = (Ropeobj178006*)0;
LOC84 = genliteral_539273_839829468(p0, n0);
putdataintodest_550436_839829468(p0, d0, (*n0).typ, LOC84);
}
break;
case ((Tnodekind292020) 6) ... ((Tnodekind292020) 15):
case ((Tnodekind292020) 16) ... ((Tnodekind292020) 19):
case ((Tnodekind292020) 5):
{
Ropeobj178006* LOC86;
LOC86 = (Ropeobj178006*)0;
LOC86 = genliteral_539273_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC86, ((Tstorageloc292812) 0));
}
break;
case ((Tnodekind292020) 27):
case ((Tnodekind292020) 32):
case ((Tnodekind292020) 29):
case ((Tnodekind292020) 30):
case ((Tnodekind292020) 31):
case ((Tnodekind292020) 26):
case ((Tnodekind292020) 28):
{
Tnode292802* op0;
genlinedir_532823_839829468(p0, n0);
op0 = (*n0).kindU.S6.sons->data[((NI) 0)];
{
Tloc292816 a0;
if (!(*n0).typ == 0) goto LA90;
memset((void*)(&a0), 0, sizeof(a0));
{
NIM_BOOL LOC94;
LOC94 = (NIM_BOOL)0;
LOC94 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC94)) goto LA95;
LOC94 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA95: ;
if (!LOC94) goto LA96;
genmagicexpr_557033_839829468(p0, n0, (&a0), (*(*op0).kindU.S4.sym).magic);
}
goto LA92;
LA96: ;
{
gencall_543632_839829468(p0, n0, (&a0));
}
LA92: ;
}
goto LA88;
LA90: ;
{
{
NIM_BOOL LOC102;
LOC102 = (NIM_BOOL)0;
LOC102 = ((*op0).kind == ((Tnodekind292020) 3));
if (!(LOC102)) goto LA103;
LOC102 = !(((*(*op0).kindU.S4.sym).magic == ((Tmagic292524) 0)));
LA103: ;
if (!LOC102) goto LA104;
genmagicexpr_557033_839829468(p0, n0, d0, (*(*op0).kindU.S4.sym).magic);
}
goto LA100;
LA104: ;
{
gencall_543632_839829468(p0, n0, d0);
}
LA100: ;
}
LA88: ;
}
break;
case ((Tnodekind292020) 39):
{
{
NIM_BOOL LOC110;
NI LOC112;
Ropeobj178006* LOC115;
LOC110 = (NIM_BOOL)0;
LOC110 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC110)) goto LA111;
LOC112 = (NI)0;
LOC112 = len_293081_850551059(n0);
LOC110 = !((LOC112 == ((NI) 0)));
LA111: ;
if (!LOC110) goto LA113;
LOC115 = (Ropeobj178006*)0;
LOC115 = gensetnode_549664_839829468(p0, n0);
putintodest_550468_839829468(p0, d0, (*n0).typ, LOC115, ((Tstorageloc292812) 0));
}
goto LA108;
LA113: ;
{
gensetconstr_557496_839829468(p0, n0, d0);
}
LA108: ;
}
break;
case ((Tnodekind292020) 41):
{
{
NIM_BOOL LOC120;
NI LOC122;
LOC120 = (NIM_BOOL)0;
LOC120 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC120)) goto LA121;
LOC122 = (NI)0;
LOC122 = len_293081_850551059(n0);
LOC120 = !((LOC122 == ((NI) 0)));
LA121: ;
if (!LOC120) goto LA123;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA118;
LA123: ;
{
Ttype292840* LOC126;
LOC126 = (Ttype292840*)0;
LOC126 = skiptypes_296099_850551059((*n0).typ, IL64(211106242013440));
if (!((*LOC126).kind == ((Ttypekind292244) 24))) goto LA127;
genseqconstr_555004_839829468(p0, n0, d0);
}
goto LA118;
LA127: ;
{
genarrayconstr_558207_839829468(p0, n0, d0);
}
LA118: ;
}
break;
case ((Tnodekind292020) 37):
{
{
NIM_BOOL LOC133;
NI LOC135;
LOC133 = (NIM_BOOL)0;
LOC133 = isdeepconstexpr_318566_2616423590(n0);
if (!(LOC133)) goto LA134;
LOC135 = (NI)0;
LOC135 = len_293081_850551059(n0);
LOC133 = !((LOC135 == ((NI) 0)));
LA134: ;
if (!LOC133) goto LA136;
exprcomplexconst_558684_839829468(p0, n0, d0);
}
goto LA131;
LA136: ;
{
gentupleconstr_557618_839829468(p0, n0, d0);
}
LA131: ;
}
break;
case ((Tnodekind292020) 38):
{
genobjconstr_554903_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 61):
{
gencast_556538_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 58):
case ((Tnodekind292020) 59):
case ((Tnodekind292020) 60):
{
genconv_556633_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 64):
case ((Tnodekind292020) 63):
{
genaddr_553051_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 42):
{
genbracketexpr_554277_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 47):
case ((Tnodekind292020) 65):
{
genderef_543921_839829468(p0, n0, d0, NIM_FALSE);
}
break;
case ((Tnodekind292020) 45):
{
genrecordfield_553448_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 46):
{
gencheckedrecordfield_554046_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 127):
case ((Tnodekind292020) 112):
{
genblock_546083_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 126):
{
genstmtlistexpr_558402_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 115):
{
{
NI i_559023_839829468;
NI HEX3Atmp_559276_839829468;
NI LOC151;
NI res_559279_839829468;
i_559023_839829468 = (NI)0;
HEX3Atmp_559276_839829468 = (NI)0;
LOC151 = (NI)0;
LOC151 = sonslen_295351_850551059(n0);
HEX3Atmp_559276_839829468 = (NI)(LOC151 - ((NI) 1));
res_559279_839829468 = ((NI) 0);
{
while (1) {
if (!(res_559279_839829468 <= HEX3Atmp_559276_839829468)) goto LA153;
i_559023_839829468 = res_559279_839829468;
genstmts_539244_839829468(p0, (*n0).kindU.S6.sons->data[i_559023_839829468]);
res_559279_839829468 += ((NI) 1);
} LA153: ;
}
}
}
break;
case ((Tnodekind292020) 48):
case ((Tnodekind292020) 92):
{
genif_544982_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 93):
{
expr_539248_839829468(p0, (*(*n0).kindU.S6.sons->data[((NI) 1)]).kindU.S6.sons->data[((NI) 0)], d0);
}
break;
case ((Tnodekind292020) 66):
{
downconv_558581_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 67):
{
upconv_558431_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 68):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_563));
}
break;
case ((Tnodekind292020) 69):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_564));
}
break;
case ((Tnodekind292020) 70):
{
genrangechck_556591_839829468(p0, n0, d0, ((NimStringDesc*) &T839829468_565));
}
break;
case ((Tnodekind292020) 71):
{
convstrtocstr_556643_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 72):
{
convcstrtostr_556655_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 51):
case ((Tnodekind292020) 52):
{
Tsym292834* sym0;
sym0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
genproc_532951_839829468((*p0).module, sym0);
{
NIM_BOOL LOC166;
NimStringDesc* LOC170;
LOC166 = (NIM_BOOL)0;
LOC166 = ((*sym0).loc.r == NIM_NIL);
if (LOC166) goto LA167;
LOC166 = ((*sym0).loc.t == NIM_NIL);
LA167: ;
if (!LOC166) goto LA168;
LOC170 = (NimStringDesc*)0;
LOC170 = rawNewString((*(*sym0).name).s->Sup.len + 20);
appendString(LOC170, ((NimStringDesc*) &T839829468_271));
appendString(LOC170, (*(*sym0).name).s);
internalerror_196100_155036129((*n0).info, LOC170);
}
LA168: ;
putlocintodest_539258_839829468(p0, d0, (&(*sym0).loc));
}
break;
case ((Tnodekind292020) 155):
{
genclosure_557836_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 1):
{
}
break;
case ((Tnodekind292020) 96):
{
genwhilestmt_545985_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 99):
case ((Tnodekind292020) 100):
{
genvarstmt_544854_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 101):
{
genconststmt_544909_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 94):
{
internalerror_196100_155036129((*n0).info, ((NimStringDesc*) &T839829468_594));
}
break;
case ((Tnodekind292020) 97):
{
gencase_547827_839829468(p0, n0, d0);
}
break;
case ((Tnodekind292020) 109):
{
genreturnstmt_545617_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 110):
{
genbreakstmt_546444_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 73):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA183;
genasgn_549239_839829468(p0, n0, NIM_FALSE);
}
LA183: ;
}
break;
case ((Tnodekind292020) 74):
{
{
if (!!((((*n0).flags &(1U<<((NU)(((Tnodeflag292427) 14))&15U)))!=0))) goto LA188;
genasgn_549239_839829468(p0, n0, !(((*p0).prc == NIM_NIL)));
}
LA188: ;
}
break;
case ((Tnodekind292020) 114):
{
{
Tloc292816 a0;
if (!!(((*(*n0).kindU.S6.sons->data[((NI) 0)]).kind == ((Tnodekind292020) 1)))) goto LA193;
genlinedir_532823_839829468(p0, n0);
memset((void*)(&a0), 0, sizeof(a0));
initlocexpr_539283_839829468(p0, (*n0).kindU.S6.sons->data[((NI) 0)], (&a0));
}
LA193: ;
}
break;
case ((Tnodekind292020) 89):
{
genasmstmt_548659_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 106):
{
{
NIM_BOOL LOC199;
NIM_BOOL LOC200;
LOC199 = (NIM_BOOL)0;
LOC200 = (NIM_BOOL)0;
LOC200 = (gcmd_169132_2607990831 == ((Tcommands169076) 2));
if (LOC200) goto LA201;
LOC200 = (((*(*(*p0).module).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA201: ;
LOC199 = LOC200;
if (!(LOC199)) goto LA202;
LOC199 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 31))&63U)))!=0));
LA202: ;
if (!LOC199) goto LA203;
gentrycpp_547866_839829468(p0, n0, d0);
}
goto LA197;
LA203: ;
{
gentry_548114_839829468(p0, n0, d0);
}
LA197: ;
}
break;
case ((Tnodekind292020) 108):
{
genraisestmt_546828_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 98):
{
gentypesection_538184_839829468((*p0).module, n0);
}
break;
case ((Tnodekind292020) 125):
case ((Tnodekind292020) 84):
case ((Tnodekind292020) 121):
case ((Tnodekind292020) 116):
case ((Tnodekind292020) 117):
case ((Tnodekind292020) 118):
case ((Tnodekind292020) 119):
case ((Tnodekind292020) 120):
case ((Tnodekind292020) 83):
case ((Tnodekind292020) 82):
{
}
break;
case ((Tnodekind292020) 90):
{
genpragma_549039_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 91):
{
Tnode292802* LOC211;
LOC211 = (Tnode292802*)0;
LOC211 = lastson_295364_850551059(n0);
expr_539248_839829468(p0, LOC211, d0);
}
break;
case ((Tnodekind292020) 79):
case ((Tnodekind292020) 80):
case ((Tnodekind292020) 81):
{
{
Tsym292834* prc0;
if (!((*(*n0).kindU.S6.sons->data[((NI) 2)]).kind == ((Tnodekind292020) 1))) goto LA215;
prc0 = (*(*n0).kindU.S6.sons->data[((NI) 0)]).kindU.S4.sym;
{
NIM_BOOL LOC219;
Tsym292834* LOC220;
LOC219 = (NIM_BOOL)0;
LOC220 = (Tsym292834*)0;
LOC220 = skipgenericowner_297280_850551059(prc0);
LOC219 = ((*LOC220).kind == ((Tsymkind292435) 6));
if (!(LOC219)) goto LA221;
LOC219 = !((((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 23))&31U)))!=0));
LA221: ;
if (!LOC219) goto LA222;
{
NIM_BOOL LOC226;
NIM_BOOL LOC227;
NIM_BOOL LOC228;
NIM_BOOL LOC229;
Tsym292834* LOC231;
NIM_BOOL LOC234;
LOC226 = (NIM_BOOL)0;
LOC227 = (NIM_BOOL)0;
LOC228 = (NIM_BOOL)0;
LOC229 = (NIM_BOOL)0;
LOC229 = !(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 2))&63U)))!=0));
if (!(LOC229)) goto LA230;
LOC231 = (Tsym292834*)0;
LOC231 = getmodule_299123_2984716966(prc0);
LOC229 = !((((*LOC231).flags &(1U<<((NU)(((Tsymflag292184) 25))&31U)))!=0));
LA230: ;
LOC228 = LOC229;
if (LOC228) goto LA232;
LOC228 = ((65600 & (*prc0).flags) == 64);
LA232: ;
LOC227 = LOC228;
if (LOC227) goto LA233;
LOC234 = (NIM_BOOL)0;
LOC234 = (((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 6))&31U)))!=0);
if (!(LOC234)) goto LA235;
LOC234 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 5))&15U)))!=0);
LA235: ;
LOC227 = LOC234;
LA233: ;
LOC226 = LOC227;
if (LOC226) goto LA236;
LOC226 = ((*prc0).kind == ((Tsymkind292435) 13));
LA236: ;
if (!LOC226) goto LA237;
{
NIM_BOOL LOC241;
Tnode292802* LOC242;
LOC241 = (NIM_BOOL)0;
LOC242 = (Tnode292802*)0;
LOC242 = getbody_335226_1724185294(prc0);
LOC241 = !(((*LOC242).kind == ((Tnodekind292020) 1)));
if (LOC241) goto LA243;
LOC241 = (((*prc0).loc.flags &(1U<<((NU)(((Tlocflag292810) 4))&15U)))!=0);
LA243: ;
if (!LOC241) goto LA244;
genproc_532951_839829468((*p0).module, prc0);
}
LA244: ;
}
LA237: ;
}
LA222: ;
}
LA215: ;
}
break;
case ((Tnodekind292020) 95):
{
genparforstmt_546208_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 157):
{
genstate_544117_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 156):
{
gengotostate_544144_839829468(p0, n0);
}
break;
case ((Tnodekind292020) 158):
{
genbreakstate_544229_839829468(p0, n0);
}
break;
default:
{
NimStringDesc* LOC251;
LOC251 = (NimStringDesc*)0;
LOC251 = rawNewString(reprEnum((NI)(*n0).kind, (&NTI292020))->Sup.len + 25);
appendString(LOC251, ((NimStringDesc*) &T839829468_291));
appendString(LOC251, reprEnum((NI)(*n0).kind, (&NTI292020)));
appendString(LOC251, ((NimStringDesc*) &T839829468_657));
internalerror_196100_155036129((*n0).info, LOC251);
}
break;
}
}
N_NIMCALL(void, genstmts_539244_839829468)(Tcproc529021* p0, Tnode292802* t0) {
Tloc292816 a0;
memset((void*)(&a0), 0, sizeof(a0));
expr_539248_839829468(p0, t0, (&a0));
{
NimStringDesc* LOC5;
if (!!(((7 &(1U<<((NU)(a0.k)&15U)))!=0))) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = HEX24_196185_1689653243(T839829468_658);
internalerror_196113_155036129(LOC5);
}
LA3: ;
}
N_NIMCALL(Tnode292802*, myprocess_563402_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}BeforeRet: ;
return result0;
}
N_NIMCALL(Ropeobj178006*, getsomeinitname_561904_839829468)(Tsym292834* m0, NimStringDesc* suffix0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
NimStringDesc* LOC5;
if (!((12288 & (*m0).flags) == 0)) goto LA3;
LOC5 = (NimStringDesc*)0;
LOC5 = mangle_528847_2036603609((*(*(*m0).owner).name).s);
result0 = rope_178277_2381377266(LOC5);
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_12));
}
LA3: ;
add_178487_2381377266(&result0, (*(*m0).name).s);
add_178487_2381377266(&result0, suffix0);
return result0;
}
N_NIMCALL(Ropeobj178006*, getinitname_562235_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_659));
return result0;
}
N_NIMCALL(Ropeobj178006*, getdatinitname_562239_839829468)(Tsym292834* m0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getsomeinitname_561904_839829468(m0, ((NimStringDesc*) &T839829468_660));
return result0;
}
N_NIMCALL(void, registermoduletomain_562243_839829468)(Tsym292834* m0) {
Ropeobj178006* init0;
Ropeobj178006* datinit0;
TY178507 LOC1;
TY178507 LOC2;
init0 = getinitname_562235_839829468(m0);
datinit0 = getdatinitname_562239_839829468(m0);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = init0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC1, 1);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = datinit0;
addf_179205_2381377266(&mainmodprocs_529148_3723162438, ((NimStringDesc*) &T839829468_661), LOC2, 1);
{
TY178507 LOC7;
Ropeobj178006* initcall0;
TY178507 LOC8;
if (!!((((*m0).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0))) goto LA5;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = datinit0;
addf_179205_2381377266(&maindatinit_529151_3723162438, ((NimStringDesc*) &T839829468_662), LOC7, 1);
memset((void*)LOC8, 0, sizeof(LOC8));
LOC8[0] = init0;
initcall0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_662), LOC8, 1);
{
if (!(((*m0).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA11;
add_178482_2381377266(&mainmodinit_529149_3723162438, initcall0);
}
goto LA9;
LA11: ;
{
add_178482_2381377266(&othermodsinit_529150_3723162438, initcall0);
}
LA9: ;
}
LA5: ;
}
N_NIMCALL(Ropeobj178006*, genfilenames_561688_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
LOC1 = (Ropeobj178006*)0;
LOC1 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_673));
result0 = NIM_NIL;
{
NI i_561717_839829468;
NI HEX3Atmp_561722_839829468;
NI res_561725_839829468;
i_561717_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = (NI)0;
HEX3Atmp_561722_839829468 = ((fileinfos_191629_155036129 ? fileinfos_191629_155036129->Sup.len : 0) - 1);
res_561725_839829468 = ((NI) 0);
{
while (1) {
TY178507 LOC5;
if (!(res_561725_839829468 <= HEX3Atmp_561722_839829468)) goto LA4;
i_561717_839829468 = res_561725_839829468;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = makecstring_191638_155036129(fileinfos_191629_155036129->data[i_561717_839829468].projpath);
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_674), LOC5, 1);
res_561725_839829468 += ((NI) 1);
} LA4: ;
}
}
return result0;
}
N_NIMCALL(void, genmainproc_561729_839829468)(Tcgen529027* m0) {
NimStringDesc* nimmain0;
NimStringDesc* othermain0;
Ropeobj178006* initstackbottomcall0;
TY536475 LOC38;
TY535238 LOC47;
nimmain0 = (NimStringDesc*)0;
othermain0 = (NimStringDesc*)0;
{
NIM_BOOL LOC3;
NIM_BOOL LOC12;
LOC3 = (NIM_BOOL)0;
LOC3 = (targetos_176629_4151366050 == ((Tsystemos176004) 2));
if (!(LOC3)) goto LA4;
LOC3 = !(((gglobaloptions_169130_2607990831 & 1280) == 0));
LA4: ;
if (!LOC3) goto LA5;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 10))&63U)))!=0)) goto LA9;
nimmain0 = copyString(((NimStringDesc*) &T839829468_663));
othermain0 = copyString(((NimStringDesc*) &T839829468_664));
}
goto LA7;
LA9: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_666));
}
LA7: ;
LOC12 = (NIM_BOOL)0;
LOC12 = includestr_147249_3771138726((&(*m0).headerfiles), ((NimStringDesc*) &T839829468_667));
}
goto LA1;
LA5: ;
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA14;
nimmain0 = copyString(((NimStringDesc*) &T839829468_665));
othermain0 = copyString(((NimStringDesc*) &T839829468_668));
}
goto LA1;
LA14: ;
{
if (!(targetos_176629_4151366050 == ((Tsystemos176004) 24))) goto LA17;
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_670));
}
goto LA1;
LA17: ;
{
nimmain0 = copyString(((NimStringDesc*) &T839829468_669));
othermain0 = copyString(((NimStringDesc*) &T839829468_671));
}
LA1: ;
{
Ropeobj178006* LOC24;
if (!!((gbreakpoints_548861_839829468 == NIM_NIL))) goto LA22;
LOC24 = (Ropeobj178006*)0;
LOC24 = cgsym_532403_839829468(m0, ((NimStringDesc*) &T839829468_672));
}
LA22: ;
{
Ropeobj178006* LOC29;
if (!((goptions_169128_2607990831 &(1U<<((NU)(((Toption169009) 17))&31U)))!=0)) goto LA27;
LOC29 = (Ropeobj178006*)0;
LOC29 = genfilenames_561688_839829468(m0);
add_178482_2381377266(&gbreakpoints_548861_839829468, LOC29);
}
LA27: ;
{
NIM_BOOL LOC32;
LOC32 = (NIM_BOOL)0;
LOC32 = (targetos_176629_4151366050 == ((Tsystemos176004) 24));
if (LOC32) goto LA33;
LOC32 = (gselectedgc_169133_2607990831 == ((Tgcmode169080) 0));
LA33: ;
if (!LOC32) goto LA34;
initstackbottomcall0 = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
goto LA30;
LA34: ;
{
TY533289 LOC37;
memset((void*)LOC37, 0, sizeof(LOC37));
initstackbottomcall0 = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_675), LOC37, 0);
}
LA30: ;
(*m0).labels += ((NI) 1);
memset((void*)LOC38, 0, sizeof(LOC38));
LOC38[0] = maindatinit_529151_3723162438;
LOC38[1] = gbreakpoints_548861_839829468;
LOC38[2] = othermodsinit_529150_3723162438;
{
NIM_BOOL LOC41;
TY533289 LOC45;
LOC41 = (NIM_BOOL)0;
LOC41 = emulatedthreadvars_532949_839829468();
if (!(LOC41)) goto LA42;
LOC41 = !((targetos_176629_4151366050 == ((Tsystemos176004) 24)));
LA42: ;
if (!LOC41) goto LA43;
memset((void*)LOC45, 0, sizeof(LOC45));
LOC38[3] = ropecg_532407_839829468(m0, ((NimStringDesc*) &T839829468_677), LOC45, 0);
}
goto LA39;
LA43: ;
{
LOC38[3] = rope_178277_2381377266(((NimStringDesc*) &T839829468_490));
}
LA39: ;
LOC38[4] = initstackbottomcall0;
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_676), LOC38, 5);
memset((void*)LOC47, 0, sizeof(LOC47));
LOC47[0] = mainmodinit_529149_3723162438;
LOC47[1] = initstackbottomcall0;
LOC47[2] = rope_178401_2381377266(((NI64) ((*m0).labels)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], nimmain0, LOC47, 3);
{
TY533289 LOC52;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 20))&63U)))!=0))) goto LA50;
memset((void*)LOC52, 0, sizeof(LOC52));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 10))- 0], othermain0, LOC52, 0);
}
LA50: ;
}
N_NIMCALL(Tnode292802*, myclose_563830_839829468)(Tpasscontext341002* b0, Tnode292802* n0) {
Tnode292802* result0;
Tcgen529027* m0;
{ result0 = (Tnode292802*)0;
result0 = n0;
{
NIM_BOOL LOC3;
LOC3 = (NIM_BOOL)0;
LOC3 = (b0 == NIM_NIL);
if (LOC3) goto LA4;
LOC3 = skipcodegen_341085_2355241294(n0);
LA4: ;
if (!LOC3) goto LA5;
goto BeforeRet;
}
LA5: ;
m0 = ((Tcgen529027*) (b0));
{
if (!!((n0 == NIM_NIL))) goto LA9;
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
genstmts_539244_839829468((*m0).initproc, n0);
}
LA9: ;
registermoduletomain_562243_839829468((*m0).module);
{
Tnode292802* disp0;
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA13;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 5))%(sizeof(NU8)*8));
disp0 = generatemethoddispatchers_432151_3853300031();
{
NI i_563891_839829468;
NI HEX3Atmp_563895_839829468;
NI LOC16;
NI res_563898_839829468;
i_563891_839829468 = (NI)0;
HEX3Atmp_563895_839829468 = (NI)0;
LOC16 = (NI)0;
LOC16 = sonslen_295351_850551059(disp0);
HEX3Atmp_563895_839829468 = (NI)(LOC16 - ((NI) 1));
res_563898_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563898_839829468 <= HEX3Atmp_563895_839829468)) goto LA18;
i_563891_839829468 = res_563898_839829468;
genprocaux_560284_839829468(m0, (*(*disp0).kindU.S6.sons->data[i_563891_839829468]).kindU.S4.sym);
res_563898_839829468 += ((NI) 1);
} LA18: ;
}
}
genmainproc_561729_839829468(m0);
}
LA13: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, finishmodule_563420_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Tsym292834* prc0;
if (!(i0 <= ((*m0).forwardedprocs ? ((*m0).forwardedprocs->Sup.len-1) : -1))) goto LA2;
prc0 = (*m0).forwardedprocs->data[i0];
{
NimStringDesc* LOC7;
if (!(((*prc0).flags &(1U<<((NU)(((Tsymflag292184) 4))&31U)))!=0)) goto LA5;
LOC7 = (NimStringDesc*)0;
LOC7 = rawNewString((*(*prc0).name).s->Sup.len + 17);
appendString(LOC7, ((NimStringDesc*) &T839829468_678));
appendString(LOC7, (*(*prc0).name).s);
internalerror_196100_155036129((*prc0).info, LOC7);
}
LA5: ;
genprocnoforward_560906_839829468(m0, prc0);
i0 += ((NI) 1);
} LA2: ;
}
gforwardedprocscounter_529171_3723162438 -= i0;
(*m0).forwardedprocs = (Tsymseq292804*) setLengthSeq(&((*m0).forwardedprocs)->Sup, sizeof(Tsym292834*), ((NI) 0));
}
N_NIMCALL(void, geninitcode_562286_839829468)(Tcgen529027* m0) {
Ropeobj178006* initname0;
Ropeobj178006* prc0;
TY178507 LOC1;
Ropeobj178006* LOC12;
Ropeobj178006* LOC13;
Ropeobj178006** LOC14;
Ropeobj178006** LOC15;
Ropeobj178006** LOC16;
Ropeobj178006* LOC17;
Ropeobj178006* LOC33;
Ropeobj178006** LOC34;
Ropeobj178006** LOC35;
Ropeobj178006** LOC36;
Ropeobj178006* LOC37;
Ropeobj178006* LOC38;
Ropeobj178006** LOC39;
Ropeobj178006** LOC40;
Ropeobj178006** LOC41;
Ropeobj178006* LOC42;
Ropeobj178006* LOC50;
TY533289 LOC51;
TY178507 LOC52;
TY533289 LOC58;
initname0 = getinitname_562235_839829468((*m0).module);
memset((void*)LOC1, 0, sizeof(LOC1));
LOC1[0] = initname0;
prc0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_679), LOC1, 1);
{
TY532811 LOC6;
if (!(((NI) 0) < (*m0).typenodes)) goto LA4;
memset((void*)LOC6, 0, sizeof(LOC6));
LOC6[0] = (*m0).typenodesname;
LOC6[1] = rope_178401_2381377266(((NI64) ((*m0).typenodes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_680), LOC6, 2);
}
LA4: ;
{
TY532811 LOC11;
if (!(((NI) 0) < (*m0).nimtypes)) goto LA9;
memset((void*)LOC11, 0, sizeof(LOC11));
LOC11[0] = (*m0).nimtypesname;
LOC11[1] = rope_178401_2381377266(((NI64) ((*m0).nimtypes)));
appcg_532632_839829468(m0, &(*m0).s[(((Tcfilesection529005) 12))- 0], ((NimStringDesc*) &T839829468_681), LOC11, 2);
}
LA9: ;
LOC12 = (Ropeobj178006*)0;
LOC12 = initgcframe_538435_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC12);
LOC13 = (Ropeobj178006*)0;
LOC13 = gensectionstart_530081_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC13);
LOC14 = (Ropeobj178006**)0;
LOC14 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC14));
LOC15 = (Ropeobj178006**)0;
LOC15 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC15));
LOC16 = (Ropeobj178006**)0;
LOC16 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, (*LOC16));
LOC17 = (Ropeobj178006*)0;
LOC17 = gensectionend_530116_2760143328(((Tcprocsection529011) 0));
add_178482_2381377266(&prc0, LOC17);
{
NIM_BOOL LOC20;
LOC20 = (NIM_BOOL)0;
LOC20 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC20)) goto LA21;
LOC20 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 2))&7U)))!=0));
LA21: ;
if (!LOC20) goto LA22;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 2))%(sizeof(NU8)*8));
{
Ropeobj178006* procname0;
Ropeobj178006* LOC28;
Ropeobj178006* LOC29;
if (!!((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0))) goto LA26;
procname0 = makecstring_191638_155036129((*(*(*m0).module).name).s);
LOC28 = (Ropeobj178006*)0;
LOC28 = quotedfilename_196818_155036129((*(*m0).module).info);
LOC29 = (Ropeobj178006*)0;
LOC29 = initframe_560140_839829468((*m0).initproc, procname0, LOC28);
add_178482_2381377266(&prc0, LOC29);
}
goto LA24;
LA26: ;
{
TY533289 LOC31;
Ropeobj178006* LOC32;
memset((void*)LOC31, 0, sizeof(LOC31));
LOC32 = (Ropeobj178006*)0;
LOC32 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_682), LOC31, 0);
add_178482_2381377266(&prc0, LOC32);
}
LA24: ;
}
LA22: ;
LOC33 = (Ropeobj178006*)0;
LOC33 = gensectionstart_530081_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC33);
LOC34 = (Ropeobj178006**)0;
LOC34 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC34));
LOC35 = (Ropeobj178006**)0;
LOC35 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC35));
LOC36 = (Ropeobj178006**)0;
LOC36 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, (*LOC36));
LOC37 = (Ropeobj178006*)0;
LOC37 = gensectionend_530116_2760143328(((Tcprocsection529011) 1));
add_178482_2381377266(&prc0, LOC37);
LOC38 = (Ropeobj178006*)0;
LOC38 = gensectionstart_530081_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC38);
LOC39 = (Ropeobj178006**)0;
LOC39 = s_529179_3723162438((*m0).preinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC39));
LOC40 = (Ropeobj178006**)0;
LOC40 = s_529179_3723162438((*m0).initproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC40));
LOC41 = (Ropeobj178006**)0;
LOC41 = s_529179_3723162438((*m0).postinitproc, ((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, (*LOC41));
LOC42 = (Ropeobj178006*)0;
LOC42 = gensectionend_530116_2760143328(((Tcprocsection529011) 2));
add_178482_2381377266(&prc0, LOC42);
{
NIM_BOOL LOC45;
Ropeobj178006* LOC49;
LOC45 = (NIM_BOOL)0;
LOC45 = (((*(*m0).initproc).options &(1U<<((NU)(((Toption169009) 15))&31U)))!=0);
if (!(LOC45)) goto LA46;
LOC45 = !((((*m0).flags &(1U<<((NU)(((Codegenflag529025) 0))&7U)))!=0));
LA46: ;
if (!LOC45) goto LA47;
LOC49 = (Ropeobj178006*)0;
LOC49 = deinitframe_560150_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC49);
}
LA47: ;
LOC50 = (Ropeobj178006*)0;
LOC50 = deinitgcframe_538441_839829468((*m0).initproc);
add_178482_2381377266(&prc0, LOC50);
memset((void*)LOC51, 0, sizeof(LOC51));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC51, 0);
memset((void*)LOC52, 0, sizeof(LOC52));
LOC52[0] = getdatinitname_562239_839829468((*m0).module);
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_679), LOC52, 1);
{
Tcfilesection529005 i_562401_839829468;
NI res_562482_839829468;
i_562401_839829468 = (Tcfilesection529005)0;
res_562482_839829468 = ((NI) 12);
{
while (1) {
Ropeobj178006* LOC56;
Ropeobj178006* LOC57;
if (!(res_562482_839829468 <= ((NI) 16))) goto LA55;
i_562401_839829468 = ((Tcfilesection529005) (res_562482_839829468));
LOC56 = (Ropeobj178006*)0;
LOC56 = gensectionstart_530015_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC56);
add_178482_2381377266(&prc0, (*m0).s[(i_562401_839829468)- 0]);
LOC57 = (Ropeobj178006*)0;
LOC57 = gensectionend_530050_2760143328(i_562401_839829468);
add_178482_2381377266(&prc0, LOC57);
res_562482_839829468 += ((NI) 1);
} LA55: ;
}
}
memset((void*)LOC58, 0, sizeof(LOC58));
addf_179205_2381377266(&prc0, ((NimStringDesc*) &T839829468_683), LOC58, 0);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], prc0);
{
NIM_CHAR i_562442_839829468;
Ropeobj178006* el_562443_839829468;
TY529136 HEX3Atmp_562487_839829468;
NIM_CHAR i_562490_839829468;
i_562442_839829468 = (NIM_CHAR)0;
el_562443_839829468 = (Ropeobj178006*)0;
memset((void*)HEX3Atmp_562487_839829468, 0, sizeof(HEX3Atmp_562487_839829468));
memcpy((void*)HEX3Atmp_562487_839829468, (NIM_CONST void*)(*m0).extensionloaders, sizeof(HEX3Atmp_562487_839829468));
i_562490_839829468 = 48;
{
if (!((NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))) <= (NU8)(57))) goto LA62;
{
while (1) {
i_562442_839829468 = i_562490_839829468;
el_562443_839829468 = HEX3Atmp_562487_839829468[(((NU8)(i_562490_839829468)))- 48];
{
Ropeobj178006* ex0;
TY532811 LOC70;
if (!!((el_562443_839829468 == NIM_NIL))) goto LA68;
memset((void*)LOC70, 0, sizeof(LOC70));
LOC70[0] = rope_178401_2381377266(((NI64) ((NI)(((NI) (((NU8)(i_562442_839829468)))) - ((NI) 48)))));
LOC70[1] = el_562443_839829468;
ex0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_684), LOC70, 2);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 11))- 0], ex0);
}
LA68: ;
{
if (!((NU8)(57) <= (NU8)(((NIM_CHAR) (((NU8)(i_562490_839829468))))))) goto LA73;
goto LA64;
}
LA73: ;
i_562490_839829468 += ((NI) 1);
}
} LA64: ;
}
LA62: ;
}
}
N_NIMCALL(void, finishtypedescriptions_535842_839829468)(Tcgen529027* m0) {
NI i0;
i0 = ((NI) 0);
{
while (1) {
Ropeobj178006* LOC3;
if (!(i0 < ((*m0).typestack ? (*m0).typestack->Sup.len : 0))) goto LA2;
LOC3 = (Ropeobj178006*)0;
LOC3 = gettypedesc_535673_839829468(m0, (*m0).typestack->data[i0]);
i0 += ((NI) 1);
} LA2: ;
}
}
N_NIMCALL(Ropeobj178006*, getcopyright_561665_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
{
TY178507 LOC5;
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 4))&63U)))!=0)) goto LA3;
memset((void*)LOC5, 0, sizeof(LOC5));
LOC5[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_685), LOC5, 1);
}
goto LA1;
LA3: ;
{
TY536475 LOC7;
NimStringDesc* LOC8;
memset((void*)LOC7, 0, sizeof(LOC7));
LOC7[0] = rope_178277_2381377266(((NimStringDesc*) &T839829468_686));
LOC7[1] = rope_178277_2381377266(Os_176068_4151366050[(targetos_176629_4151366050)- 1].Field0);
LOC7[2] = rope_178277_2381377266(Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field0);
LOC7[3] = rope_178277_2381377266(Cc_273413_2528170400[(ccompiler_273431_2528170400)- 1].Field0);
LOC8 = (NimStringDesc*)0;
LOC8 = getcompilecfilecmd_274284_2528170400(cfile0, NIM_FALSE);
LOC7[4] = rope_178277_2381377266(LOC8);
result0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_687), LOC7, 5);
}
LA1: ;
return result0;
}
static N_INLINE(void, addinttypes_561659_839829468)(Ropeobj178006** result0) {
NimStringDesc* LOC1;
TY178507 LOC2;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + 22);
appendString(LOC1, ((NimStringDesc*) &T839829468_688));
appendString(LOC1, tnl_176644_4151366050);
memset((void*)LOC2, 0, sizeof(LOC2));
LOC2[0] = rope_178401_2381377266(((NI64) (Cpu_176496_4151366050[(targetcpu_176627_4151366050)- 1].Field1)));
addf_179205_2381377266(result0, LOC1, LOC2, 1);
}
N_NIMCALL(Ropeobj178006*, getfileheader_561683_839829468)(NimStringDesc* cfile0) {
Ropeobj178006* result0;
result0 = (Ropeobj178006*)0;
result0 = getcopyright_561665_839829468(cfile0);
addinttypes_561659_839829468(&result0);
return result0;
}
N_NIMCALL(void, generatethreadlocalstorage_538717_839829468)(Tcgen529027* m0) {
{
NIM_BOOL LOC3;
NIM_BOOL LOC5;
TY178507 LOC13;
LOC3 = (NIM_BOOL)0;
LOC3 = !((nimtv_538656_839829468 == NIM_NIL));
if (!(LOC3)) goto LA4;
LOC5 = (NIM_BOOL)0;
LOC5 = (((*m0).flags &(1U<<((NU)(((Codegenflag529025) 1))&7U)))!=0);
if (LOC5) goto LA6;
LOC5 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0);
LA6: ;
LOC3 = LOC5;
LA4: ;
if (!LOC3) goto LA7;
{
Ttype292840* t_538761_839829468;
NI i_538768_839829468;
NI L_538770_839829468;
t_538761_839829468 = (Ttype292840*)0;
i_538768_839829468 = ((NI) 0);
L_538770_839829468 = (nimtvdeps_538674_839829468 ? nimtvdeps_538674_839829468->Sup.len : 0);
{
while (1) {
Ropeobj178006* LOC12;
if (!(i_538768_839829468 < L_538770_839829468)) goto LA11;
t_538761_839829468 = nimtvdeps_538674_839829468->data[i_538768_839829468];
LOC12 = (Ropeobj178006*)0;
LOC12 = gettypedesc_535673_839829468(m0, t_538761_839829468);
i_538768_839829468 += ((NI) 1);
} LA11: ;
}
}
memset((void*)LOC13, 0, sizeof(LOC13));
LOC13[0] = nimtv_538656_839829468;
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 4))- 0], ((NimStringDesc*) &T839829468_689), LOC13, 1);
}
LA7: ;
}
N_NIMCALL(void, generateheaders_560104_839829468)(Tcgen529027* m0) {
NimStringDesc* LOC1;
Tstrentry147009* it0;
LOC1 = (NimStringDesc*)0;
LOC1 = rawNewString(tnl_176644_4151366050->Sup.len + tnl_176644_4151366050->Sup.len + 20);
appendString(LOC1, tnl_176644_4151366050);
appendString(LOC1, ((NimStringDesc*) &T839829468_690));
appendString(LOC1, tnl_176644_4151366050);
add_178487_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC1);
it0 = ((Tstrentry147009*) ((*m0).headerfiles.head));
{
while (1) {
if (!!((it0 == NIM_NIL))) goto LA3;
{
NimStringDesc* LOC8;
NimStringDesc* LOC9;
Ropeobj178006* LOC10;
if (!((NU8)((*it0).data->data[((NI) 0)]) == (NU8)(35))) goto LA6;
LOC8 = (NimStringDesc*)0;
LOC9 = (NimStringDesc*)0;
LOC9 = nsuReplaceChar((*it0).data, 96, 34);
LOC8 = rawNewString(LOC9->Sup.len + tnl_176644_4151366050->Sup.len + 0);
appendString(LOC8, LOC9);
appendString(LOC8, tnl_176644_4151366050);
LOC10 = (Ropeobj178006*)0;
LOC10 = rope_178277_2381377266(LOC8);
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], LOC10);
}
goto LA4;
LA6: ;
{
TY178507 LOC14;
if (!!((((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(34)) || ((NU8)((*it0).data->data[((NI) 0)])) == ((NU8)(60))))) goto LA12;
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_691), LOC14, 1);
}
goto LA4;
LA12: ;
{
TY178507 LOC16;
memset((void*)LOC16, 0, sizeof(LOC16));
LOC16[0] = rope_178277_2381377266((*it0).data);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 1))- 0], ((NimStringDesc*) &T839829468_692), LOC16, 1);
}
LA4: ;
it0 = ((Tstrentry147009*) ((*it0).Sup.next));
} LA3: ;
}
}
N_NIMCALL(Ropeobj178006*, genmodule_562491_839829468)(Tcgen529027* m0, NimStringDesc* cfile0) {
Ropeobj178006* result0;
Ropeobj178006* LOC1;
result0 = (Ropeobj178006*)0;
result0 = getfileheader_561683_839829468(cfile0);
LOC1 = (Ropeobj178006*)0;
LOC1 = genmergeinfo_530203_2760143328(m0);
add_178482_2381377266(&result0, LOC1);
generatethreadlocalstorage_538717_839829468(m0);
generateheaders_560104_839829468(m0);
{
Tcfilesection529005 i_562614_839829468;
NI res_562622_839829468;
i_562614_839829468 = (Tcfilesection529005)0;
res_562622_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC5;
Ropeobj178006* LOC6;
if (!(res_562622_839829468 <= ((NI) 10))) goto LA4;
i_562614_839829468 = ((Tcfilesection529005) (res_562622_839829468));
LOC5 = (Ropeobj178006*)0;
LOC5 = gensectionstart_530015_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC5);
add_178482_2381377266(&result0, (*m0).s[(i_562614_839829468)- 0]);
LOC6 = (Ropeobj178006*)0;
LOC6 = gensectionend_530050_2760143328(i_562614_839829468);
add_178482_2381377266(&result0, LOC6);
res_562622_839829468 += ((NI) 1);
} LA4: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
return result0;
}
N_NIMCALL(void, updatecachedmodule_563813_839829468)(Tcgen529027* m0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = mergerequired_530832_2760143328(m0);
if (!(LOC3)) goto LA4;
LOC3 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA4: ;
if (!LOC3) goto LA5;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
LA5: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, generatethreadvarssize_538771_839829468)(Tcgen529027* m0) {
{
NimStringDesc* externc0;
TY178507 LOC12;
if (!!((nimtv_538656_839829468 == NIM_NIL))) goto LA3;
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = !((gcmd_169132_2607990831 == ((Tcommands169076) 2)));
if (!(LOC7)) goto LA8;
LOC7 = (((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 27))&31U)))!=0);
LA8: ;
if (!LOC7) goto LA9;
externc0 = copyString(((NimStringDesc*) &T839829468_693));
}
goto LA5;
LA9: ;
{
externc0 = copyString(((NimStringDesc*) &T839829468_490));
}
LA5: ;
memset((void*)LOC12, 0, sizeof(LOC12));
LOC12[0] = rope_178277_2381377266(externc0);
addf_179205_2381377266(&(*m0).s[(((Tcfilesection529005) 10))- 0], ((NimStringDesc*) &T839829468_694), LOC12, 1);
}
LA3: ;
}
N_NIMCALL(NIM_BOOL, shouldrecompile_563621_839829468)(Ropeobj178006* code0, NimStringDesc* cfile0) {
NIM_BOOL result0;
{ result0 = (NIM_BOOL)0;
result0 = NIM_TRUE;
{
NimStringDesc* objfile0;
if (!!(((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0))) goto LA3;
objfile0 = toobjfile_273859_2528170400(cfile0);
{
NIM_BOOL LOC7;
LOC7 = (NIM_BOOL)0;
LOC7 = writeropeifnotequal_179511_2381377266(code0, cfile0);
if (!LOC7) goto LA8;
goto BeforeRet;
}
LA8: ;
{
NIM_BOOL LOC12;
LOC12 = (NIM_BOOL)0;
LOC12 = nosexistsFile(objfile0);
if (!(LOC12)) goto LA13;
LOC12 = nosfileNewer(objfile0, cfile0);
LA13: ;
if (!LOC12) goto LA14;
result0 = NIM_FALSE;
}
LA14: ;
}
goto LA1;
LA3: ;
{
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
}
LA1: ;
}BeforeRet: ;
return result0;
}
N_NIMCALL(void, writemodule_563637_839829468)(Tcgen529027* m0, NIM_BOOL pending0) {
NimStringDesc* cfile0;
NimStringDesc* cfilenoext0;
cfile0 = getcfile_563201_839829468(m0);
cfilenoext0 = noschangeFileExt(cfile0, ((NimStringDesc*) &T839829468_490));
{
NIM_BOOL LOC3;
Ropeobj178006* code0;
LOC3 = (NIM_BOOL)0;
LOC3 = !((*m0).Sup.fromcache);
if (LOC3) goto LA4;
LOC3 = ((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 1))&63U)))!=0);
LA4: ;
if (!LOC3) goto LA5;
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0)) goto LA9;
add_178482_2381377266(&(*m0).s[(((Tcfilesection529005) 7))- 0], mainmodprocs_529148_3723162438);
generatethreadvarssize_538771_839829468(m0);
}
LA9: ;
code0 = genmodule_562491_839829468(m0, cfile0);
{
NIM_BOOL LOC13;
LOC13 = (NIM_BOOL)0;
LOC13 = shouldrecompile_563621_839829468(code0, cfile0);
if (!LOC13) goto LA14;
addfiletocompile_273863_2528170400(cfile0);
}
LA14: ;
}
goto LA1;
LA5: ;
{
NIM_BOOL LOC17;
NIM_BOOL LOC18;
Ropeobj178006* code0;
LOC17 = (NIM_BOOL)0;
LOC18 = (NIM_BOOL)0;
LOC18 = pending0;
if (!(LOC18)) goto LA19;
LOC18 = mergerequired_530832_2760143328(m0);
LA19: ;
LOC17 = LOC18;
if (!(LOC17)) goto LA20;
LOC17 = !((((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 12))&31U)))!=0));
LA20: ;
if (!LOC17) goto LA21;
mergefiles_531241_2760143328(cfile0, m0);
geninitcode_562286_839829468(m0);
finishtypedescriptions_535842_839829468(m0);
code0 = genmodule_562491_839829468(m0, cfile0);
writerope_178836_2381377266(code0, cfile0, NIM_FALSE);
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA21: ;
{
NimStringDesc* LOC24;
NIM_BOOL LOC25;
LOC24 = (NimStringDesc*)0;
LOC24 = toobjfile_273859_2528170400(cfilenoext0);
LOC25 = (NIM_BOOL)0;
LOC25 = nosexistsFile(LOC24);
if (!!(LOC25)) goto LA26;
addfiletocompile_273863_2528170400(cfile0);
}
goto LA1;
LA26: ;
LA1: ;
addfiletolink_273872_2528170400(cfilenoext0);
}
N_NIMCALL(void, writeheader_563149_839829468)(Tcgen529027* m0) {
Ropeobj178006* result0;
Ropeobj178006* guard0;
TY178507 LOC1;
TY128506 LOC2;
TY178507 LOC3;
TY533289 LOC13;
TY178507 LOC14;
result0 = getcopyright_561665_839829468((*m0).filename);
memset((void*)LOC1, 0, sizeof(LOC1));
memset((void*)(&LOC2), 0, sizeof(LOC2));
nossplitFile((*m0).filename, (&LOC2));
LOC1[0] = rope_178277_2381377266(LOC2.Field1);
guard0 = HEX25_178905_2381377266(((NimStringDesc*) &T839829468_695), LOC1, 1);
memset((void*)LOC3, 0, sizeof(LOC3));
LOC3[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_696), LOC3, 1);
addinttypes_561659_839829468(&result0);
generateheaders_560104_839829468(m0);
generatethreadlocalstorage_538717_839829468(m0);
{
Tcfilesection529005 i_563171_839829468;
NI res_563197_839829468;
i_563171_839829468 = (Tcfilesection529005)0;
res_563197_839829468 = ((NI) 1);
{
while (1) {
Ropeobj178006* LOC7;
Ropeobj178006* LOC8;
if (!(res_563197_839829468 <= ((NI) 10))) goto LA6;
i_563171_839829468 = ((Tcfilesection529005) (res_563197_839829468));
LOC7 = (Ropeobj178006*)0;
LOC7 = gensectionstart_530015_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC7);
add_178482_2381377266(&result0, (*m0).s[(i_563171_839829468)- 0]);
LOC8 = (Ropeobj178006*)0;
LOC8 = gensectionend_530050_2760143328(i_563171_839829468);
add_178482_2381377266(&result0, LOC8);
res_563197_839829468 += ((NI) 1);
} LA6: ;
}
}
add_178482_2381377266(&result0, (*m0).s[(((Tcfilesection529005) 11))- 0]);
{
if (!((gglobaloptions_169130_2607990831 &((NU64)1<<((NU)(((Tglobaloption169013) 8))&63U)))!=0)) goto LA11;
add_178487_2381377266(&result0, ((NimStringDesc*) &T839829468_22));
}
LA11: ;
memset((void*)LOC13, 0, sizeof(LOC13));
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_697), LOC13, 0);
memset((void*)LOC14, 0, sizeof(LOC14));
LOC14[0] = guard0;
addf_179205_2381377266(&result0, ((NimStringDesc*) &T839829468_698), LOC14, 1);
writerope_178836_2381377266(result0, (*m0).filename, NIM_FALSE);
}
N_NIMCALL(void, cgenwritemodules_563902_839829468)(void) {
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA3;
finishmodule_563420_839829468(generatedheader_532201_839829468);
}
LA3: ;
{
while (1) {
if (!(((NI) 0) < gforwardedprocscounter_529171_3723162438)) goto LA6;
{
Tcgen529027* m_563916_839829468;
m_563916_839829468 = (Tcgen529027*)0;
{
NI i_563935_839829468;
NI HEX3Atmp_563937_839829468;
NI res_563939_839829468;
i_563935_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (NI)0;
HEX3Atmp_563937_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563939_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563939_839829468 <= HEX3Atmp_563937_839829468)) goto LA10;
i_563935_839829468 = res_563939_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563935_839829468] == NIM_NIL))) goto LA13;
m_563916_839829468 = gmodules_529170_3723162438->data[i_563935_839829468];
{
if (!!((*m_563916_839829468).Sup.fromcache)) goto LA17;
finishmodule_563420_839829468(m_563916_839829468);
}
LA17: ;
}
LA13: ;
res_563939_839829468 += ((NI) 1);
} LA10: ;
}
}
}
} LA6: ;
}
{
Tcgen529027* m_563917_839829468;
m_563917_839829468 = (Tcgen529027*)0;
{
NI i_563946_839829468;
NI HEX3Atmp_563948_839829468;
NI res_563950_839829468;
i_563946_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (NI)0;
HEX3Atmp_563948_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563950_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563950_839829468 <= HEX3Atmp_563948_839829468)) goto LA22;
i_563946_839829468 = res_563950_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563946_839829468] == NIM_NIL))) goto LA25;
m_563917_839829468 = gmodules_529170_3723162438->data[i_563946_839829468];
{
if (!(*m_563917_839829468).Sup.fromcache) goto LA29;
updatecachedmodule_563813_839829468(m_563917_839829468);
}
goto LA27;
LA29: ;
{
writemodule_563637_839829468(m_563917_839829468, NIM_TRUE);
}
LA27: ;
}
LA25: ;
res_563950_839829468 += ((NI) 1);
} LA22: ;
}
}
}
writemapping_274789_2528170400(gmapping_529152_3723162438);
{
if (!!((generatedheader_532201_839829468 == NIM_NIL))) goto LA34;
writeheader_563149_839829468(generatedheader_532201_839829468);
}
LA34: ;
}
N_NIMCALL(void, nullify_562833_839829468)(Ropeobj178006** arr0) {
{
Tcfilesection529005 i_562848_839829468;
NI res_562853_839829468;
i_562848_839829468 = (Tcfilesection529005)0;
res_562853_839829468 = ((NI) 0);
{
while (1) {
if (!(res_562853_839829468 <= ((NI) 17))) goto LA3;
i_562848_839829468 = ((Tcfilesection529005) (res_562853_839829468));
unsureAsgnRef((void**) (&arr0[(i_562848_839829468)- 0]), NIM_NIL);
res_562853_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, nullify_562858_839829468)(Ropeobj178006** arr0) {
{
NIM_CHAR i_563014_839829468;
NI res_563019_839829468;
i_563014_839829468 = (NIM_CHAR)0;
res_563019_839829468 = ((NI) 48);
{
while (1) {
if (!(res_563019_839829468 <= ((NI) 57))) goto LA3;
i_563014_839829468 = ((NIM_CHAR) (res_563019_839829468));
unsureAsgnRef((void**) (&arr0[(((NU8)(i_563014_839829468)))- 48]), NIM_NIL);
res_563019_839829468 += ((NI) 1);
} LA3: ;
}
}
}
N_NIMCALL(void, resetmodule_562763_839829468)(Tcgen529027* m0) {
initlinkedlist_147031_3771138726((&(*m0).headerfiles));
initintset_268885_2627731572((&(*m0).declaredprotos));
initidtable_296019_850551059((&(*m0).forwtypecache));
asgnRef((void**) (&(*m0).initproc), newproc_529206_3723162438(NIM_NIL, m0));
(*(*m0).initproc).options = initprocoptions_562635_839829468(m0);
asgnRef((void**) (&(*m0).preinitproc), newpreinitproc_562625_839829468(m0));
asgnRef((void**) (&(*m0).postinitproc), newpostinitproc_562630_839829468(m0));
initnodetable_296085_850551059((&(*m0).datacache));
if ((*m0).typestack) nimGCunrefNoCycle((*m0).typestack);
(*m0).typestack = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
if ((*m0).forwardedprocs) nimGCunrefNoCycle((*m0).forwardedprocs);
(*m0).forwardedprocs = (Tsymseq292804*) newSeqRC1((&NTI292804), 0);
asgnRefNoCycle((void**) (&(*m0).typenodesname), gettempname_533598_839829468(m0));
asgnRefNoCycle((void**) (&(*m0).nimtypesname), gettempname_533598_839829468(m0));
{
if (!(((*(*m0).module).flags &(1U<<((NU)(((Tsymflag292184) 13))&31U)))!=0)) goto LA3;
(*m0).flags |= ((NU8)1)<<((((Codegenflag529025) 0))%(sizeof(NU8)*8));
}
goto LA1;
LA3: ;
{
(*m0).flags &= ~(((NU8)1) << ((((Codegenflag529025) 0)) % (sizeof(NU8)*8)));
}
LA1: ;
nullify_562833_839829468((*m0).s);
(*m0).typenodes = ((NI) 0);
(*m0).nimtypes = ((NI) 0);
nullify_562858_839829468((*m0).extensionloaders);
(*m0).Sup.fromcache = NIM_TRUE;
}
N_NIMCALL(void, resetcgenmodules_563024_839829468)(void) {
{
Tcgen529027* m_563026_839829468;
m_563026_839829468 = (Tcgen529027*)0;
{
NI i_563031_839829468;
NI HEX3Atmp_563033_839829468;
NI res_563035_839829468;
i_563031_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (NI)0;
HEX3Atmp_563033_839829468 = (gmodules_529170_3723162438 ? (gmodules_529170_3723162438->Sup.len-1) : -1);
res_563035_839829468 = ((NI) 0);
{
while (1) {
if (!(res_563035_839829468 <= HEX3Atmp_563033_839829468)) goto LA4;
i_563031_839829468 = res_563035_839829468;
{
if (!!((gmodules_529170_3723162438->data[i_563031_839829468] == NIM_NIL))) goto LA7;
m_563026_839829468 = gmodules_529170_3723162438->data[i_563031_839829468];
resetmodule_562763_839829468(m_563026_839829468);
}
LA7: ;
res_563035_839829468 += ((NI) 1);
} LA4: ;
}
}
}
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenInit000)(void) {
nimRegisterGlobalMarker(T839829468_2);
nimRegisterGlobalMarker(T839829468_3);
nimRegisterGlobalMarker(T839829468_5);
nimRegisterGlobalMarker(T839829468_6);
nimRegisterGlobalMarker(T839829468_7);
nimRegisterGlobalMarker(T839829468_8);
asgnRefNoCycle((void**) (&indent_532655_839829468), rope_178277_2381377266(((NimStringDesc*) &T839829468_4)));
if (nimtvdeps_538674_839829468) nimGCunrefNoCycle(nimtvdeps_538674_839829468);
nimtvdeps_538674_839829468 = (Ttypeseq292836*) newSeqRC1((&NTI292836), 0);
chckNil((void*)(&nimtvdeclared_538675_839829468));
genericReset((void*)(&nimtvdeclared_538675_839829468), (&NTI268030));
initintset_268885_2627731572((&nimtvdeclared_538675_839829468));
breakpointid_548860_839829468 = ((NI) 0);
}
NIM_EXTERNC N_NOINLINE(void, compiler_cgenDatInit000)(void) {
}
|
matvec.h | #ifndef MATVEC_H
#define MATVEC_H
#include <stdlib.h> // malloc and free.
#include <stdio.h> // printf
#include <omp.h>
// The function as give in the exercise.
void dense_mat_vec(int m, int n, double *x, double *A, double *y)
{
int i, j;
for (i=0; i<m; i++){
double tmp = 0.;
for (j=0; j<n; j++)
tmp += A[i*n+j] * y[j];
x[i] = tmp;
}
}
void dense_mat_vec_omp(int m, int n, double *x, double *A, double *y)
{
int i, j;
#ifdef _OPENMP
#pragma omp parallel for private(j) // So simple!
#endif
for (i=0; i<m; i++){
double tmp = 0.;
for (j=0; j<n; j++)
tmp += A[i*n+j] * y[j];
x[i] = tmp;
}
}
#endif
|
channel_shuffle.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_CHANNEL_SHUFFLE_H_
#define MACE_KERNELS_CHANNEL_SHUFFLE_H_
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/kernels/kernel.h"
namespace mace {
namespace kernels {
template<DeviceType D, typename T>
struct ChannelShuffleFunctor : OpKernel {
ChannelShuffleFunctor(OpKernelContext *context, const int groups)
: OpKernel(context), groups_(groups) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
MACE_RETURN_IF_ERROR(output->ResizeLike(input));
Tensor::MappingGuard logits_guard(input);
Tensor::MappingGuard output_guard(output);
const T *input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
index_t batch = input->dim(0);
index_t channels = input->dim(1);
index_t height = input->dim(2);
index_t width = input->dim(3);
index_t image_size = height * width;
index_t batch_size = channels * image_size;
index_t channels_per_group = channels / groups_;
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channels; ++c) {
const T *input_base = input_ptr + b * batch_size;
T *output_base = output_ptr + b * batch_size;
index_t g = c % groups_;
index_t idx = c / groups_;
for (index_t hw = 0; hw < height * width; ++hw) {
output_base[c * image_size + hw] = input_base[
(g * channels_per_group + idx) * image_size + hw];
}
}
}
return MACE_SUCCESS;
}
const int groups_;
};
#ifdef MACE_ENABLE_OPENCL
class OpenCLChannelShuffleKernel {
public:
virtual MaceStatus Compute(
OpKernelContext *context,
const Tensor *input,
Tensor *output,
StatsFuture *future) = 0;
MACE_VIRTUAL_EMPTY_DESTRUCTOR(OpenCLChannelShuffleKernel);
};
template<typename T>
struct ChannelShuffleFunctor<DeviceType::GPU, T> : OpKernel {
ChannelShuffleFunctor(OpKernelContext *context, const int groups);
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
std::unique_ptr<OpenCLChannelShuffleKernel> kernel_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_CHANNEL_SHUFFLE_H_
|
plot.h | #ifndef OPENMC_PLOT_H
#define OPENMC_PLOT_H
#include <unordered_map>
#include <sstream>
#include "pugixml.hpp"
#include "xtensor/xarray.hpp"
#include "hdf5.h"
#include "openmc/position.h"
#include "openmc/constants.h"
#include "openmc/cell.h"
#include "openmc/geometry.h"
#include "openmc/particle.h"
#include "openmc/xml_interface.h"
namespace openmc {
//===============================================================================
// Global variables
//===============================================================================
class Plot;
namespace model {
extern std::vector<Plot> plots; //!< Plot instance container
extern std::unordered_map<int, int> plot_map; //!< map of plot ids to index
} // namespace model
//===============================================================================
// RGBColor holds color information for plotted objects
//===============================================================================
struct RGBColor {
//Constructors
RGBColor() : red(0), green(0), blue(0) { };
RGBColor(const int v[3]) : red(v[0]), green(v[1]), blue(v[2]) { };
RGBColor(int r, int g, int b) : red(r), green(g), blue(b) { };
RGBColor(const std::vector<int> &v) {
if (v.size() != 3) {
throw std::out_of_range("Incorrect vector size for RGBColor.");
}
red = v[0];
green = v[1];
blue = v[2];
}
// Members
uint8_t red, green, blue;
};
typedef xt::xtensor<RGBColor, 2> ImageData;
struct IdData {
// Constructor
IdData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
// Members
xt::xtensor<int32_t, 3> data_; //!< 2D array of cell & material ids
};
struct PropertyData {
// Constructor
PropertyData(size_t h_res, size_t v_res);
// Methods
void set_value(size_t y, size_t x, const Particle& p, int level);
// Members
xt::xtensor<double, 3> data_; //!< 2D array of temperature & density data
};
enum class PlotType {
slice = 1,
voxel = 2
};
enum class PlotBasis {
xy = 1,
xz = 2,
yz = 3
};
enum class PlotColorBy {
cells = 0,
mats = 1
};
//===============================================================================
// Plot class
//===============================================================================
class PlotBase {
public:
template<class T> T get_map() const;
// Members
public:
Position origin_; //!< Plot origin in geometry
Position width_; //!< Plot width in geometry
PlotBasis basis_; //!< Plot basis (XY/XZ/YZ)
std::array<size_t, 3> pixels_; //!< Plot size in pixels
int level_; //!< Plot universe level
};
template<class T>
T PlotBase::get_map() const {
size_t width = pixels_[0];
size_t height = pixels_[1];
// get pixel size
double in_pixel = (width_[0])/static_cast<double>(width);
double out_pixel = (width_[1])/static_cast<double>(height);
// size data array
T data(width, height);
// setup basis indices and initial position centered on pixel
int in_i, out_i;
Position xyz = origin_;
switch(basis_) {
case PlotBasis::xy :
in_i = 0;
out_i = 1;
break;
case PlotBasis::xz :
in_i = 0;
out_i = 2;
break;
case PlotBasis::yz :
in_i = 1;
out_i = 2;
break;
#ifdef __GNUC__
default:
__builtin_unreachable();
#endif
}
// set initial position
xyz[in_i] = origin_[in_i] - width_[0] / 2. + in_pixel / 2.;
xyz[out_i] = origin_[out_i] + width_[1] / 2. - out_pixel / 2.;
// arbitrary direction
Direction dir = {0.7071, 0.7071, 0.0};
#pragma omp parallel
{
Particle p;
p.r() = xyz;
p.u() = dir;
p.coord_[0].universe = model::root_universe;
int level = level_;
int j{};
#pragma omp for
for (int y = 0; y < height; y++) {
p.r()[out_i] = xyz[out_i] - out_pixel * y;
for (int x = 0; x < width; x++) {
p.r()[in_i] = xyz[in_i] + in_pixel * x;
p.n_coord_ = 1;
// local variables
bool found_cell = find_cell(&p, 0);
j = p.n_coord_ - 1;
if (level >=0) {j = level + 1;}
if (found_cell) {
data.set_value(y, x, p, j);
}
} // inner for
} // outer for
} // omp parallel
return data;
}
class Plot : public PlotBase {
public:
// Constructor
Plot(pugi::xml_node plot);
// Methods
private:
void set_id(pugi::xml_node plot_node);
void set_type(pugi::xml_node plot_node);
void set_output_path(pugi::xml_node plot_node);
void set_bg_color(pugi::xml_node plot_node);
void set_basis(pugi::xml_node plot_node);
void set_origin(pugi::xml_node plot_node);
void set_width(pugi::xml_node plot_node);
void set_universe(pugi::xml_node plot_node);
void set_default_colors(pugi::xml_node plot_node);
void set_user_colors(pugi::xml_node plot_node);
void set_meshlines(pugi::xml_node plot_node);
void set_mask(pugi::xml_node plot_node);
// Members
public:
int id_; //!< Plot ID
PlotType type_; //!< Plot type (Slice/Voxel)
PlotColorBy color_by_; //!< Plot coloring (cell/material)
int meshlines_width_; //!< Width of lines added to the plot
int index_meshlines_mesh_; //!< Index of the mesh to draw on the plot
RGBColor meshlines_color_; //!< Color of meshlines on the plot
RGBColor not_found_; //!< Plot background color
std::vector<RGBColor> colors_; //!< Plot colors
std::string path_plot_; //!< Plot output filename
};
//===============================================================================
// Non-member functions
//===============================================================================
//! Add mesh lines to image data of a plot object
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void draw_mesh_lines(Plot pl, ImageData& data);
//! Write a ppm image to file using a plot object's image data
//! \param[in] plot object
//! \param[out] image data associated with the plot object
void output_ppm(Plot pl, const ImageData& data);
//! Initialize a voxel file
//! \param[in] id of an open hdf5 file
//! \param[in] dimensions of the voxel file (dx, dy, dz)
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to memory space of voxel data
void voxel_init(hid_t file_id, const hsize_t* dims, hid_t* dspace,
hid_t* dset, hid_t* memspace);
//! Write a section of the voxel data to hdf5
//! \param[in] voxel slice
//! \param[out] dataspace pointer to voxel data
//! \param[out] dataset pointer to voxesl data
//! \param[out] pointer to data to write
void voxel_write_slice(int x, hid_t dspace, hid_t dset,
hid_t memspace, void* buf);
//! Close voxel file entities
//! \param[in] data space to close
//! \param[in] dataset to close
//! \param[in] memory space to close
void voxel_finalize(hid_t dspace, hid_t dset, hid_t memspace);
//===============================================================================
// External functions
//===============================================================================
//! Read plot specifications from a plots.xml file
void read_plots_xml();
//! Create a ppm image for a plot object
//! \param[in] plot object
void create_ppm(Plot pl);
//! Create an hdf5 voxel file for a plot object
//! \param[in] plot object
void create_voxel(Plot pl);
//! Create a randomly generated RGB color
//! \return RGBColor with random value
RGBColor random_color();
} // namespace openmc
#endif // OPENMC_PLOT_H
|
pad.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_PAD_H_
#define MACE_KERNELS_PAD_H_
#include <algorithm>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
namespace kernels {
struct PadFunctorBase {
PadFunctorBase(const std::vector<int> &paddings,
const float constant_value)
: paddings_(paddings), constant_value_(constant_value) {}
std::vector<int> paddings_;
float constant_value_;
};
template<DeviceType D, typename T>
struct PadFunctor : public PadFunctorBase {
PadFunctor(const std::vector<int> &paddings,
const float constant_value)
: PadFunctorBase(paddings, constant_value) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future) {
MACE_UNUSED(future);
MACE_CHECK(
this->paddings_.size() == static_cast<size_t>(input->dim_size()) * 2);
auto input_shape = input->shape();
MACE_RETURN_IF_ERROR(output->Resize({input_shape[0] + this->paddings_[0]
+ this->paddings_[1],
input_shape[1] + this->paddings_[2]
+ this->paddings_[3],
input_shape[2] + this->paddings_[4]
+ this->paddings_[5],
input_shape[3] + this->paddings_[6]
+ this->paddings_[7]}));
Tensor::MappingGuard input_guard(input);
Tensor::MappingGuard output_guard(output);
auto input_ptr = input->data<T>();
T *output_ptr = output->mutable_data<T>();
std::fill(output_ptr, output_ptr + output->size(), this->constant_value_);
const index_t batch = input->dim(0);
const index_t channel = input->dim(1);
const index_t height = input->dim(2);
const index_t width = input->dim(3);
#pragma omp parallel for collapse(3)
for (index_t b = 0; b < batch; ++b) {
for (index_t c = 0; c < channel; ++c) {
for (index_t h = 0; h < height; ++h) {
const index_t in_offset = (((b * channel + c) * height) + h) * width;
const index_t out_offset = (((b + this->paddings_[0]) * output->dim(1)
+ (c + this->paddings_[2])) * output->dim(2)
+ (h + this->paddings_[4])) * output->dim(3)
+ this->paddings_[6];
memcpy(output_ptr + out_offset,
input_ptr + in_offset,
width * sizeof(T));
}
}
}
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template <typename T>
struct PadFunctor<DeviceType::GPU, T> : PadFunctorBase {
PadFunctor(const std::vector<int> &paddings,
const float constant_value)
: PadFunctorBase(paddings, constant_value) {}
MaceStatus operator()(const Tensor *input,
Tensor *output,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_PAD_H_
|
3d25pt_var.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 128;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) {
for (t1=-1;t1<=floord(Nt-1,3);t1++) {
lbp=max(ceild(t1,2),ceild(6*t1-Nt+2,6));
ubp=min(floord(4*Nt+Nz-9,24),floord(12*t1+Nz+6,24));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(max(0,ceild(3*t1,2)),ceild(24*t2-Nz+5,8)),3*t1-3*t2+1);t3<=min(min(min(floord(4*Nt+Ny-9,8),floord(12*t1+Ny+15,8)),floord(24*t2+Ny+11,8)),floord(24*t1-24*t2+Nz+Ny+13,8));t3++) {
for (t4=max(max(max(max(0,ceild(3*t1-3*t2-14,16)),ceild(3*t1-30,32)),ceild(24*t2-Nz-115,128)),ceild(8*t3-Ny-115,128));t4<=min(min(min(min(floord(4*Nt+Nx-9,128),floord(12*t1+Nx+15,128)),floord(24*t2+Nx+11,128)),floord(8*t3+Nx-5,128)),floord(24*t1-24*t2+Nz+Nx+13,128));t4++) {
for (t5=max(max(max(max(max(0,ceild(24*t2-Nz+5,4)),ceild(8*t3-Ny+5,4)),ceild(128*t4-Nx+5,4)),3*t1),6*t1-6*t2+1);t5<=min(min(min(min(min(floord(24*t1-24*t2+Nz+18,4),2*t3),Nt-1),3*t1+5),6*t2+4),32*t4+30);t5++) {
for (t6=max(max(24*t2,4*t5+4),-24*t1+24*t2+8*t5-23);t6<=min(min(24*t2+23,-24*t1+24*t2+8*t5),4*t5+Nz-5);t6++) {
for (t7=max(8*t3,4*t5+4);t7<=min(8*t3+7,4*t5+Ny-5);t7++) {
lbv=max(128*t4,4*t5+4);
ubv=min(128*t4+127,4*t5+Nx-5);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
steppmethods.c | // Sammlung verwendeter Funktionen
#include "global.h"
#include "targets.h"
#include <gsl/gsl_cblas.h>
#include <gsl/gsl_rng.h>
#include <gsl/gsl_randist.h>
#include <gsl/gsl_sf_gamma.h>
#include <math.h>
#include <stdio.h>
#include "utility.h"
#include <string.h>
#include <time.h>
#include <omp.h>
void VVerlet_Step(const int N, double x[N/2] , double v[N/2], double a[N/2], double *t, //mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
void (*derivmethod) (double *y, double *ans, double t,int N)) {
double temp_vec[N], abl[N];
int i,j;
for (j = 0; j < DIM; j++ ) // heavy particle outside parallel region
{
v[j] = v[j] + 0.5 * a[j] * MAXSTEPSIZE;
x[j] = x[j] + v[j] * MAXSTEPSIZE;
temp_vec[j] = x[j]; //temp_vec zur abl- berechnung
temp_vec[DIM + j] = v[j] * mass;
}
#pragma omp parallel for //parallelisiere über Osszillatoren
for (i= 0 ; i < OSSZI; i++)
{
for (j = 0; j < DIM; j++ )
{
v[DIM + j + i*DIM] = v[DIM + j + i*DIM] + 0.5 * a[DIM + j + i*DIM] * MAXSTEPSIZE;
x[DIM + j + i*DIM] = x[DIM + j + i*DIM] + v[DIM + j + i*DIM] * MAXSTEPSIZE;
temp_vec[2*DIM + i * 2*DIM + j] = x[DIM + j + i*DIM]; //temp_vec zur abl- berechnung
temp_vec[3*DIM + i * 2*DIM + j] = v[DIM + j + i*DIM] ;
}
}
derivmethod(temp_vec, abl, *t, N); //bilde Ableitung zu t
// gehe wieder in x-v Form für a
for (j = 0; j < DIM; j++ )
{
a[j] = abl[DIM + j] / mass;
v[j] = v[j] + 0.5 * a[j] * MAXSTEPSIZE;
}
#pragma omp parallel for
for (i= 0 ; i < OSSZI; i++)
{
for (j = 0; j < DIM; j++ )
{
a[DIM +j + i*DIM] = abl[3*DIM + j + i * 2*DIM]/massq[i];
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a[DIM +j + i*DIM] * MAXSTEPSIZE;
}
}
//#pragma omp parallel for
//for (i= 0 ; i < N/2; i++)
//{
// v[i] = v[i] + 0.5 * a[i] * MAXSTEPSIZE;
//}
Update_Lattice_Position(lattice_position, x, LATTICE_SPACING);
*t = *t + MAXSTEPSIZE;
}
void VVerlet_Step_deriv(const int N, double x[N/2] , double v[N/2], double a[N/2], double *t, //mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
void (*derivmethod) (double *y, double *ans, double t,int N)) {
int i,j;
SETNUMTHREADS
double sum[DIM];
#pragma omp parallel // Values for Bath particles parralized///////////////////
{
int ID = omp_get_thread_num();
int MAX_THREADS = omp_get_num_threads();
if (MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
sum[j] = 0.0f; // prepare sum for heavy particle
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
sum[ID] = 0.0f; // prepare sum for heavy particle
}
#pragma omp for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * MAXSTEPSIZE; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * MAXSTEPSIZE; //xnext = x + vnext *dt
}
#pragma omp for // Values for Bath particles parralized
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for(j = 0; j < DIM; j++)
{ // Ableitung /
double a_temp = a[DIM + j + i*DIM] = - om * om * x[DIM + j + i*DIM]\
+ coup * x[j]; // dp_q/dt = -w^2 * q+ gamma * x
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a_temp * MAXSTEPSIZE; // Zweites Halbupdate ////
}
}
double private_sum[DIM];
double private_c[DIM] = {0.0,0.0}; // Kahan correction
double private_t[DIM] = {0.0,0.0};
double private_y[DIM] = {0.0,0.0};
for(j = 0; j < DIM; j++) // Values for heavy particle
{
private_sum[j] = 0.0; // JEder thread berechnet Partialsumme
}
#pragma omp for // berechne die Summe gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for (j=0; j<DIM; j++)
{ // Kahan summation scheme
private_y[j] = coup * x[DIM + j + i*DIM]\
- pow(coup,2.0)/pow(om,2.0) * x[j] - private_c[j];
private_t[j] = private_sum[j] + private_y[j];
private_c[j] = (private_t[j] - private_sum[j]) - private_y[j];
private_sum[j] = private_t[j];
}
}
for(j=0; j<DIM; j++)
{
#pragma omp atomic //addiere threadsummen auf
sum[j] =sum[j] + private_sum[j];
}
#pragma omp barrier
if ( MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
a[j] = sum[j] /mass ; //p-Ableitung
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
a[ID] = sum[ID] /mass ; //p-Ableitung
}
}
for(j = 0; j< DIM; j++)
{
v[j] = v[j] + 0.5 * a[j] * MAXSTEPSIZE;
}
*t = *t + MAXSTEPSIZE;
}
void VVerlet_Step_hardsphere_reflect(const int N, double x[] , double v[], double a[], double *t, //mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
void (*derivmethod) (double *y, double *ans, double t,int N)) {
int i,j;
SETNUMTHREADS
double sum[DIM]; // variable stepsize maximum MAXSTEPSIZE
double target_vec[20];
int reflec_flag = 0; // set to 1of target if root found
int target_nr_hit = 0; // number of actual target hit
double contact_stepsize;
// upper search bound
int number_targets;
double lower_t_global[2*DIM]; // array containing contact times for all targets in cell
int reflec_flag_global[2*DIM]; // global array tro indicate reflection on target
for (i=0; i < 2*DIM; i++)
{
lower_t_global[i] = MAXSTEPSIZE; // all times a equal or smaller stepsize, so search for minimum later
reflec_flag_global[i] = 0;
}
// determine time to collsion --------------------------------------------------------------------------------------------
contact_stepsize = MAXSTEPSIZE;
Update_Lattice_Position(lattice_position, x, LATTICE_SPACING);
if ( (number_targets = TARGET_METHOD(lattice_position, target_vec, // Find Targets
LATTICE_SPACING, ESCAPE_CELLS)\
)>0
)
{
#pragma omp parallel for
for (int target_i = 0; target_i < number_targets; target_i++)
{
double lower_t; // lower search bound
double upper_t;
int signold; int signnew;
lower_t = 0.0; // lower search bound
upper_t = MAXSTEPSIZE; // upper search bound
double steps = 500.0;
int root_flag = 1; // set to one if root found and also one at fitst loop
int root_loop = 0; // number of current loop
int root_loop_max = 3;
while ((root_flag == 1) && (root_loop < root_loop_max))
{
root_flag = 0;
root_loop += 1;
// get sign lower bound before loop
double eval_value = 0.0;
for(i = 0; i< DIM; i++)
{
double v_temp = v[i] + 0.5 * a[i] * lower_t;
double x_temp = x[i] + v_temp * lower_t;
eval_value += pow( (x_temp - target_vec[i + DIM * target_i]) , 2.0); // f = sum (x-R)^2
}
eval_value = eval_value - pow(TARGET_LENGTH , 2.0);
if (eval_value > 0)
{
signold = 1;
}else
{
signold = -1;
}
double dt = (upper_t - lower_t)/ steps;
// check all signs in intervall till first change
for (double t_check = lower_t; t_check < upper_t; t_check += dt)
{
eval_value = 0.0;
for(i = 0; i< DIM; i++)
{
double v_temp = v[i] + 0.5 * a[i] * t_check;
double x_temp = x[i] + v_temp * t_check;
eval_value += pow( (x_temp - target_vec[i + DIM * target_i]) , 2.0); // f = sum (x-R)^2
}
eval_value = eval_value - pow(TARGET_LENGTH , 2.0);
//printf("%2.2e\n", eval_value);
if (eval_value > 0)
{
signnew = 1;
}else
{
signnew = -1;
}
if (signnew != signold) // at first sign change adjust intervall
{
lower_t = t_check - dt;
steps = 10000.0; // refine only 4 orders of magnitude per loop
upper_t = t_check;
root_flag = 1; // redo loop to refine root
lower_t_global[target_i] = lower_t;
break;
}
signold = signnew;
}
}
}
}// collision detection end--------------------------------------------------------------------------------------------------
double minimum_t = MAXSTEPSIZE;
for (i=0; i < number_targets; i++) //find smallest reflection time
{
if(lower_t_global[i] < minimum_t)
{
reflec_flag = 1;
target_nr_hit = i;
minimum_t = lower_t_global[i];
}
}
if (reflec_flag ==1 )
{
if (TARGET_FLAG) // check for first contact
{
time_first_contact += *t + minimum_t;
TARGET_FLAG = 0;// no need to test again after 1st contact
}
contact_stepsize = minimum_t; // pick lower boud so particle is JUST outside the boundary - Else zero search converges badly
}else
{
contact_stepsize = MAXSTEPSIZE;
}
#pragma omp parallel // Values for Bath particles parralized//
{
int ID = omp_get_thread_num();
int MAX_THREADS = omp_get_num_threads();
if (MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
sum[j] = 0.0f; // prepare sum for heavy particle
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
sum[ID] = 0.0f; // prepare sum for heavy particle
}
#pragma omp for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
#pragma omp for // Values for Bath particles parralized
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for(j = 0; j < DIM; j++)
{ // Ableitung /
double a_temp = a[DIM + j + i*DIM] = - om * om * x[DIM + j + i*DIM]\
+ coup * x[j]; // dp_q/dt = -w^2 * q+ gamma * x
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a_temp * contact_stepsize; // Zweites Halbupdate ////
}
}
double private_sum[DIM];
double private_c[DIM] = {0.0,0.0}; // Kahan correction
double private_t[DIM] = {0.0,0.0};
double private_y[DIM] = {0.0,0.0};
for(j = 0; j < DIM; j++) // Values for heavy particle
{
private_sum[j] = 0.0; // ech thread calcs partial sum
}
#pragma omp for // calc sum gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for (j=0; j<DIM; j++)
{ // Kahan summation scheme
private_y[j] = coup * x[DIM + j + i*DIM]\
- pow(coup,2.0)/pow(om,2.0) * x[j] - private_c[j];
private_t[j] = private_sum[j] + private_y[j];
private_c[j] = (private_t[j] - private_sum[j]) - private_y[j];
private_sum[j] = private_t[j];
}
}
for(j=0; j<DIM; j++)
{
#pragma omp atomic //addiere threadsummen auf
sum[j] =sum[j] + private_sum[j];
}
#pragma omp barrier
if ( MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
a[j] = sum[j] /mass ; //p-Ableitung
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
a[ID] = sum[ID] /mass ; //p-Ableitung
}
}
for(j = 0; j< DIM; j++)
{
v[j] = v[j] + 0.5 * a[j] * contact_stepsize;
}
*t = *t + contact_stepsize;
double norm = 0.0;
for(j = 0; j< DIM; j++)
{
norm += pow(target_vec[j + DIM * target_nr_hit] - x[j],2.0);
}
norm = sqrt(norm);
if (number_targets)
{
if(norm < TARGET_LENGTH * 0.98) // catch case particle entered sphere, only nonempty cells
{
FLAG_DUMP_VALUE = 1;
printf("\nsphere entered at t=%2.2e, lattice point %d %d", *t, lattice_position[0], lattice_position[1]);
}
}
if ((reflec_flag)) // reflect on surface normal. Assume particle is on the surface
// prevent case of inner reflection in case particle still got in
{
double surface_normal[DIM];
double vec_norm = 0.0;
double scalar_prod = 0.0;
// find vector towards target
for(j = 0; j< DIM; j++)
{
surface_normal[j] = target_vec[j + DIM * target_nr_hit] - x[j];
}
for(j = 0; j< DIM; j++)
{
vec_norm += surface_normal[j] * surface_normal[j];
}
// point surface vector outwards and normalize
vec_norm = sqrt(vec_norm);
for(j = 0; j< DIM; j++)
{
surface_normal[j] = surface_normal[j]/vec_norm;
}
// find part of v in surface direction
for(j = 0; j< DIM; j++)
{
scalar_prod += surface_normal[j] * v[j];
}
// reflect that part
for(j = 0; j< DIM; j++)
{
v[j] = v[j] - 2.0 * scalar_prod * surface_normal[j];
}
}
}
void VVerlet_Step_hardsphere_reflect_all(const int N, double x[] , double v[], double a[], double *t, //mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
void (*derivmethod) (double *y, double *ans, double t,int N)) {
int i,j;
SETNUMTHREADS
double sum[DIM]; // variable stepsize maximum MAXSTEPSIZE
double target_vec[20];
int reflec_flag = 0; // set to 1of target if root found
int target_nr_hit = 0; // number of actual target hit
double contact_stepsize;
// upper search bound
int number_targets;
double lower_t_global[2*DIM]; // array containing contact times for all targets in cell
int reflec_flag_global[2*DIM]; // global array tro indicate reflection on target
for (i=0; i < 2*DIM; i++)
{
lower_t_global[i] = MAXSTEPSIZE; // all times a equal or smaller stepsize, so search for minimum later
reflec_flag_global[i] = 0;
}
// determine time to collsion --------------------------------------------------------------------------------------------
contact_stepsize = MAXSTEPSIZE;
Update_Lattice_Position(lattice_position, x, LATTICE_SPACING);
if ( (number_targets = TARGET_METHOD(lattice_position, target_vec, // Find Targets
LATTICE_SPACING, ESCAPE_CELLS)\
)>0
)
{
#pragma omp parallel for
for (int target_i = 0; target_i < number_targets; target_i++)
{
double lower_t; // lower search bound
double upper_t;
int signold; int signnew;
lower_t = 0.0; // lower search bound
upper_t = MAXSTEPSIZE; // upper search bound
double steps = 500.0;
int root_flag = 1; // set to one if root found and also one at fitst loop
int root_loop = 0; // number of current loop
int root_loop_max = 3;
while ((root_flag == 1) && (root_loop < root_loop_max))
{
root_flag = 0;
root_loop += 1;
// get sign lower bound before loop
double eval_value = 0.0;
for(i = 0; i< DIM; i++)
{
double v_temp = v[i] + 0.5 * a[i] * lower_t;
double x_temp = x[i] + v_temp * lower_t;
eval_value += pow( (x_temp - target_vec[i + DIM * target_i]) , 2.0); // f = sum (x-R)^2
}
eval_value = eval_value - pow(TARGET_LENGTH , 2.0);
if (eval_value > 0)
{
signold = 1;
}else
{
signold = -1;
}
double dt = (upper_t - lower_t)/ steps;
// check all signs in intervall till first change
for (double t_check = lower_t; t_check < upper_t; t_check += dt)
{
eval_value = 0.0;
for(i = 0; i< DIM; i++)
{
double v_temp = v[i] + 0.5 * a[i] * t_check;
double x_temp = x[i] + v_temp * t_check;
eval_value += pow( (x_temp - target_vec[i + DIM * target_i]) , 2.0); // f = sum (x-R)^2
}
eval_value = eval_value - pow(TARGET_LENGTH , 2.0);
//printf("%2.2e\n", eval_value);
if (eval_value > 0)
{
signnew = 1;
}else
{
signnew = -1;
}
if (signnew != signold) // at first sign change adjust intervall
{
lower_t = t_check - dt;
steps = 10000.0; // refine only 4 orders of magnitude per loop
upper_t = t_check;
root_flag = 1; // redo loop to refine root
lower_t_global[target_i] = lower_t;
break;
}
signold = signnew;
}
}
}
}// collision detection end--------------------------------------------------------------------------------------------------
double minimum_t = MAXSTEPSIZE;
for (i=0; i < number_targets; i++) //find smallest reflection time
{
if(lower_t_global[i] < minimum_t)
{
reflec_flag = 1;
target_nr_hit = i;
minimum_t = lower_t_global[i];
}
}
if (reflec_flag ==1 )
{
if (TARGET_FLAG) // check for first contact
{
time_first_contact += *t + minimum_t;
TARGET_FLAG = 0;// no need to test again after 1st contact
}
contact_stepsize = minimum_t; // pick lower boud so particle is JUST outside the boundary - Else zero search converges badly
}else
{
contact_stepsize = MAXSTEPSIZE;
}
#pragma omp parallel // Values for Bath particles parralized//
{
int ID = omp_get_thread_num();
int MAX_THREADS = omp_get_num_threads();
if (MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
sum[j] = 0.0f; // prepare sum for heavy particle
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
sum[ID] = 0.0f; // prepare sum for heavy particle
}
#pragma omp for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
#pragma omp for // Values for Bath particles parralized
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for(j = 0; j < DIM; j++)
{ // Ableitung /
double a_temp = a[DIM + j + i*DIM] = - om * om * x[DIM + j + i*DIM]\
+ coup * x[j]; // dp_q/dt = -w^2 * q+ gamma * x
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a_temp * contact_stepsize; // Zweites Halbupdate ////
}
}
double private_sum[DIM];
double private_c[DIM] = {0.0,0.0}; // Kahan correction
double private_t[DIM] = {0.0,0.0};
double private_y[DIM] = {0.0,0.0};
for(j = 0; j < DIM; j++) // Values for heavy particle
{
private_sum[j] = 0.0; // ech thread calcs partial sum
}
#pragma omp for // calc sum gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for (j=0; j<DIM; j++)
{ // Kahan summation scheme
private_y[j] = coup * x[DIM + j + i*DIM]\
- pow(coup,2.0)/pow(om,2.0) * x[j] - private_c[j];
private_t[j] = private_sum[j] + private_y[j];
private_c[j] = (private_t[j] - private_sum[j]) - private_y[j];
private_sum[j] = private_t[j];
}
}
for(j=0; j<DIM; j++)
{
#pragma omp atomic //addiere threadsummen auf
sum[j] =sum[j] + private_sum[j];
}
#pragma omp barrier
if ( MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
a[j] = sum[j] /mass ; //p-Ableitung
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
a[ID] = sum[ID] /mass ; //p-Ableitung
}
}
for(j = 0; j< DIM; j++)
{
v[j] = v[j] + 0.5 * a[j] * contact_stepsize;
}
*t = *t + contact_stepsize;
double norm = 0.0;
for(j = 0; j< DIM; j++)
{
norm += pow(target_vec[j + DIM * target_nr_hit] - x[j],2.0);
}
norm = sqrt(norm);
if (number_targets)
{
if(norm < TARGET_LENGTH * 0.98) // catch case particle entered sphere, only nonempty cells
{
FLAG_DUMP_VALUE = 1;
printf("\nsphere entered at t=%2.2e, lattice point %d %d", *t, lattice_position[0], lattice_position[1]);
}
}
if ((reflec_flag)) // reflect on surface normal. Assume particle is on the surface
// prevent case of inner reflection in case particle still got in
{
double surface_normal[DIM];
double vec_norm = 0.0;
double scalar_prod = 0.0;
// find vector towards target
for(j = 0; j< DIM; j++)
{
surface_normal[j] = target_vec[j + DIM * target_nr_hit] - x[j];
}
for(j = 0; j< DIM; j++)
{
vec_norm += surface_normal[j] * surface_normal[j];
}
// point surface vector outwards and normalize
vec_norm = sqrt(vec_norm);
for(j = 0; j< DIM; j++)
{
surface_normal[j] = surface_normal[j]/vec_norm;
}
// find part of v in surface direction
for(j = 0; j< DIM; j++)
{
scalar_prod += surface_normal[j] * v[j];
}
// reflect that part for ALL
#pragma omp parallel for
for (i = 0; i < OSSZI; i++ )
{
for(j = 0; j< DIM; j++)
{
v[i*DIM + j] = v[i*DIM + j] - 2.0 * scalar_prod * surface_normal[j];
}
}
}
}
void VVerlet_Step_deriv_Kupf(const int N, double x[N/2] , double v[N/2], double a[N/2], double *t,
void (*derivmethod) (double *y, double *ans, double t,int N)) {
//mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
// use fully mobile Kupferman bath that is coupling U = sum gamma_i (q_i-X)^2
int i,j;
SETNUMTHREADS
double sum[DIM];
#pragma omp parallel // Values for Bath particles parralized///////////////////
{
int ID = omp_get_thread_num();
int MAX_THREADS = omp_get_num_threads();
if (MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
sum[j] = 0.0f; // prepare sum for heavy particle
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
sum[ID] = 0.0f; // prepare sum for heavy particle
}
#pragma omp for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * MAXSTEPSIZE; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * MAXSTEPSIZE; //xnext = x + vnext *dt
}
#pragma omp for // Values for Bath particles parralized
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for(j = 0; j < DIM; j++)
{ // Ableitung /
double a_temp = a[DIM + j + i*DIM] = -coup * (x[DIM + j + i*DIM] - x[j])/massq[i];
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a_temp * MAXSTEPSIZE; // Zweites Halbupdate ////
}
}
double private_sum[DIM];
double private_c[DIM] = {0.0,0.0}; // Kahan correction
double private_t[DIM] = {0.0,0.0};
double private_y[DIM] = {0.0,0.0};
for(j = 0; j < DIM; j++) // Values for heavy particle
{
private_sum[j] = 0.0; // JEder thread berechnet Partialsumme
}
#pragma omp for // berechne die Summe gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for (j=0; j<DIM; j++)
{ // Kahan summation scheme
private_y[j] = coup * (x[DIM + j + i*DIM] - x[j]) - private_c[j];
private_t[j] = private_sum[j] + private_y[j];
private_c[j] = (private_t[j] - private_sum[j]) - private_y[j];
private_sum[j] = private_t[j];
}
}
for(j=0; j<DIM; j++)
{
#pragma omp atomic //addiere threadsummen auf
sum[j] =sum[j] + private_sum[j];
}
#pragma omp barrier
if ( MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
a[j] = sum[j] /mass ; //p-Ableitung
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
a[ID] = sum[ID] /mass ; //p-Ableitung
}
}
for(j = 0; j< DIM; j++)
{
v[j] = v[j] + 0.5 * a[j] * MAXSTEPSIZE;
VIRTUAL_V[j] = v[j];
VIRTUAL_X[j] = x[j];
}
*t = *t + MAXSTEPSIZE;
}
void VVerlet_Step_hardsphere_reflect_Kupf(const int N, double x[] , double v[], double a[], double *t, //mach einzelnen update Schritt nach Velocity-Verlet ohne Hinderniss
void (*derivmethod) (double *y, double *ans, double t,int N)) {
int i,j;
SETNUMTHREADS
double sum[DIM]; // variable stepsize maximum MAXSTEPSIZE
double target_vec[20];
int reflec_flag = 0; // set to 1of target if root found
int target_nr_hit = 0; // number of actual target hit
double contact_stepsize = LASTSTEPSIZE;
// upper search bound
int number_targets;
double lower_t_global[2*DIM]; // array containing contact times for all targets in cell
int reflec_flag_global[2*DIM]; // global array tro indicate reflection on target
for (i=0; i < 2*DIM; i++)
{
lower_t_global[i] = LASTSTEPSIZE; // all times a equal or smaller stepsize, so search for minimum later
reflec_flag_global[i] = 0;
}
// determine time to collsion --------------------------------------------------------------------------------------------
contact_stepsize = LASTSTEPSIZE;
if(VIRTUAL_FLAG)
{
Update_Lattice_Position(lattice_position, VIRTUAL_X, LATTICE_SPACING);
}else
{
Update_Lattice_Position(lattice_position, x, LATTICE_SPACING);
}
if ( (number_targets = TARGET_METHOD(lattice_position, target_vec, // Find Targets
LATTICE_SPACING, ESCAPE_CELLS)\
)>0
)
{
double N_CHECK = 1000.0;
#pragma omp parallel for
for (int target_i = 0; target_i < number_targets; target_i++) // check for all targets in increments of LASTSTEPSIZE/N_CHECK for contact
{
double dh = LASTSTEPSIZE / N_CHECK;
for (double h_check = 0.0; h_check < LASTSTEPSIZE; h_check += dh)
{
double r_check = 0.0;
for (int j = 0; j < DIM; j++)
{
double v_temp = v[j] + 0.5 * a[j] * h_check;
double x_temp = x[j] + v_temp * h_check;
if(VIRTUAL_FLAG)
{
v_temp = VIRTUAL_V[j] + 0.5 * a[j] * h_check;
x_temp = VIRTUAL_X[j] + v_temp * h_check;
}
r_check += pow( (x_temp - target_vec[j + target_i * DIM] ) ,2.0 );
}
r_check = sqrt(r_check);
if (r_check < TARGET_LENGTH)
{
reflec_flag = 1;
target_nr_hit = target_i;
contact_stepsize = h_check - dh;
if (TARGET_FLAG) // check for first contact
{
time_first_contact += *t + contact_stepsize;
TARGET_FLAG = 0;// no need to test again after 1st contact
}
break;
break;
}
}
}
}
//-------------------------------------------------------------------------------------------------------------------
#pragma omp parallel // Values for Bath particles parralized//
{
int ID = omp_get_thread_num();
int MAX_THREADS = omp_get_num_threads();
if (MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
sum[j] = 0.0f; // prepare sum for heavy particle
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
sum[ID] = 0.0f; // prepare sum for heavy particle
}
#pragma omp for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
if(VIRTUAL_FLAG)
{
#pragma omp for
for (i= 0 ; i < DIM; i++) // Erstes Halbupdate ////
{
double v_temp = VIRTUAL_V[i] = VIRTUAL_V[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
VIRTUAL_X[i] = VIRTUAL_X[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
}
#pragma omp for // Values for Bath particles parralized
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for(j = 0; j < DIM; j++)
{ // Ableitung /
double a_temp = a[DIM + j + i*DIM] = -coup * (x[DIM + j + i*DIM] - x[j])/massq[i]; // dp_q/dt = -w^2 * q+ gamma * x
v[DIM +j + i*DIM] = v[DIM +j + i*DIM]\
+ 0.5 * a_temp * contact_stepsize; // Zweites Halbupdate ////
}
}
double private_sum[DIM];
double private_c[DIM] = {0.0,0.0}; // Kahan correction
double private_t[DIM] = {0.0,0.0};
double private_y[DIM] = {0.0,0.0};
for(j = 0; j < DIM; j++) // Values for heavy particle
{
private_sum[j] = 0.0; // ech thread calcs partial sum
}
#pragma omp for // calc sum gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
for (j=0; j<DIM; j++)
{ // Kahan summation scheme
private_y[j] = coup * (x[DIM + j + i*DIM] - x[j]) - private_c[j];
private_t[j] = private_sum[j] + private_y[j];
private_c[j] = (private_t[j] - private_sum[j]) - private_y[j];
private_sum[j] = private_t[j];
}
}
for(j=0; j<DIM; j++)
{
#pragma omp atomic //addiere threadsummen auf
sum[j] =sum[j] + private_sum[j];
}
#pragma omp barrier
if ( MAX_THREADS < DIM) // Catch case : not enough threads
{ for (j= 0; j<DIM; j++)
{
a[j] = sum[j] /mass ; //p-Ableitung
}
}else if (ID < DIM) // heavy particle dimensions go to threads
{
a[ID] = sum[ID] /mass ; //p-Ableitung
}
}
for(j = 0; j< DIM; j++)
{
v[j] = v[j] + 0.5 * a[j] * contact_stepsize;
}
if(VIRTUAL_FLAG)
{
for(j = 0; j< DIM; j++)
{
VIRTUAL_V[j] = VIRTUAL_V[j] + 0.5 * a[j] * contact_stepsize;
}
}
*t = *t + contact_stepsize;
double norm = 0.0;
for(j = 0; j< DIM; j++)
{
if(VIRTUAL_FLAG)
{
norm += pow(target_vec[j + DIM * target_nr_hit] - VIRTUAL_X[j],2.0);
}else
{
norm += pow(target_vec[j + DIM * target_nr_hit] - x[j],2.0);
}
}
norm = sqrt(norm);
if (number_targets)
{
if(norm < TARGET_LENGTH * 0.98) // catch case particle entered sphere, only nonempty cells
{
FLAG_DUMP_VALUE = 1;
printf("\nsphere entered at t=%2.2e, lattice point %d %d", *t, lattice_position[0], lattice_position[1]);
}
}
if ((reflec_flag)) // reflect on surface normal. Assume particle is on the surface
// prevent case of inner reflection in case particle still got in
{
double surface_normal[DIM];
double vec_norm = 0.0;
double scalar_prod = 0.0;
// find vector towards target
for(j = 0; j< DIM; j++)
{
if(VIRTUAL_FLAG)
{
surface_normal[j] = target_vec[j + DIM * target_nr_hit] - VIRTUAL_X[j];
}else
{
surface_normal[j] = target_vec[j + DIM * target_nr_hit] - x[j];
}
}
for(j = 0; j< DIM; j++)
{
vec_norm += surface_normal[j] * surface_normal[j];
}
// point surface vector outwards and normalize
vec_norm = sqrt(vec_norm);
for(j = 0; j< DIM; j++)
{
surface_normal[j] = surface_normal[j]/vec_norm;
}
// find part of v in surface direction
for(j = 0; j< DIM; j++)
{
if(VIRTUAL_FLAG)
{
scalar_prod += surface_normal[j] * VIRTUAL_V[j];
}else
{
scalar_prod += surface_normal[j] * v[j];
}
}
// reflect that part
for(j = 0; j< DIM; j++)
{
if(VIRTUAL_FLAG)
{
VIRTUAL_V[j] = VIRTUAL_V[j] - 2.0 * scalar_prod * surface_normal[j];
}else
{
v[j] = v[j] - 2.0 * scalar_prod * surface_normal[j];
}
}
}
if ((reflec_flag))
{
LASTSTEPSIZE = LASTSTEPSIZE - contact_stepsize; // do remaining step time
}
else
{
LASTSTEPSIZE = MAXSTEPSIZE;
}
}
void VVerlet_Step_deriv_Box(const int N, double x[N/2] , double v[N/2], double a[N/2], double *t,
void (*derivmethod) (double *y, double *ans, double t,int N)) {
//macht einzelnen update schritt in Box der Länge Lattice_Spacting
// use fully mobile Kupferman bath that is coupling U = sum gamma_i (q_i-X)^2
int i,j;
SETNUMTHREADS
double sum = 0.0;
double contact_stepsize = LASTSTEPSIZE;
int reflec_flag = 0;
// find contact time ------------------------------------
double aa,b,c;
aa=b=c=0;
int REFLEC_WALL = 1;
if (REFLEC_WALL)
{ // debugging purposes, free evolution if false
for (int i_bound = -1; i_bound < 2; i_bound += 2)
{
double root1 = 0.0; double root2 = 0.0;
if(VIRTUAL_FLAG)
{
c = VIRTUAL_X[0] + i_bound * LATTICE_SPACING/2.0;
b = VIRTUAL_V[0];
aa = 0.5 * a[0];
}else
{
c = x[0] + i_bound * LATTICE_SPACING/2.0;
b = v[0];
aa = 0.5 * a[0];
}
if(b >= 0) // minimize rounoff err by case split
{
root1 = (-b - sqrt(b*b - 4.0 * aa * c)) / (2.0 * aa);
root2 = 2.0 * c /(-b - sqrt(b*b - 4.0 * aa * c) );
}else
{
root1 = 2.0 * c /(-b + sqrt(b*b - 4.0 * aa * c) );
root2 = (-b + sqrt(b*b - 4.0 * aa * c)) / (2.0 * aa);
}
if( (root1 > 0) && (root1 < contact_stepsize) )
{
contact_stepsize = 0.995 * root1;
reflec_flag = 1;
}
if( (root2 > 0) && (root2 < contact_stepsize) )
{
contact_stepsize = 0.995 * root2;
reflec_flag = 1;
}
}
}
// contact time end ||||||||||||||||||||||||||||||||||||<
#pragma omp parallel for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
if(VIRTUAL_FLAG)
{
double v_temp = VIRTUAL_V[0] = VIRTUAL_V[0] + 0.5 * a[0] * contact_stepsize; // virtual particle
VIRTUAL_X[0] = VIRTUAL_X[0] + v_temp * contact_stepsize;
}
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
double a_temp = a[1 + i] = -coup * (x[1 + i] - x[0])/massq[i];
v[1 + i] = v[1 + i]\
+ 0.5 * a_temp * contact_stepsize; // Zweites Halbupdate ////
}
#pragma omp parallel for reduction(+:sum) // berechne die Summe gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
sum += coup * (x[1 + i] - x[0]);
}
a[0] = sum /mass;
v[0] = v[0] + 0.5 * a[0] * contact_stepsize;
if(VIRTUAL_FLAG)
{
VIRTUAL_V[0] = VIRTUAL_V[0] + 0.5 * a[0] * contact_stepsize;
}
*t = *t + contact_stepsize;
if(reflec_flag)
{
LASTSTEPSIZE = LASTSTEPSIZE - contact_stepsize;
if(REFLECTALL_FLAG) // reflect whole bath if flag is set
{
for (i = 0 ; i < OSSZI; i++)
{
double v_rel = v[1 + i] - v[0];
v[1 + i] = - v[0] + v_rel; //keep relative velocity after reflection of heavy particle the same
}
}
if(VIRTUAL_FLAG)
{
VIRTUAL_V[0] = -VIRTUAL_V[0]; // reflect heavy
}else
{
v[0] = -v[0]; // reflect heavy
}
}
else
{
LASTSTEPSIZE = MAXSTEPSIZE;
}
}
void VVerlet_Step_deriv_Box_Ramp(const int N, double x[N/2] , double v[N/2], double a[N/2], double *t,
void (*derivmethod) (double *y, double *ans, double t,int N)) {
//macht einzelnen update schritt in Box der Länge Lattice_Spacting
// use fully mobile Kupferman bath that is coupling U = sum gamma_i (q_i-X)^2
int i,j;
SETNUMTHREADS
double sum = 0.0;
double contact_stepsize = LASTSTEPSIZE;
int reflec_flag = 0;
#pragma omp parallel for
for (i= 0 ; i < N/2; i++) // Erstes Halbupdate ////
{
double v_temp = v[i] = v[i] + 0.5 * a[i] * contact_stepsize; //vnext = v + a/2 *dt
x[i] = x[i] + v_temp * contact_stepsize; //xnext = x + vnext *dt
}
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
double om = ommega[i];
double a_temp = a[1 + i] = -coup * (x[1 + i] - x[0])/massq[i];
v[1 + i] = v[1 + i]\
+ 0.5 * a_temp * contact_stepsize; // Zweites Halbupdate ////
}
#pragma omp parallel for reduction(+:sum) // berechne die Summe gamma_i *q_i - gamma_i^2/ommega_i^2 * x
for (i = 0 ; i < OSSZI; i++)
{
double coup = coupling[i];
sum += coup * (x[1 + i] - x[0]);
}
a[0] = sum /mass;
if(x[0] < -(LATTICE_SPACING/2.0 - L_BALL) ) //left wall;
{
double height_ramp = 2*KBOLTZ*TEMP;
a[0] += height_ramp/L_BALL/mass;
}
if(x[0] > (LATTICE_SPACING/2.0 - L_BALL) ) //right wall;
{
double height_ramp = 2*KBOLTZ*TEMP;
a[0] += -height_ramp/L_BALL/mass;
}
v[0] = v[0] + 0.5 * a[0] * contact_stepsize;
*t = *t + contact_stepsize;
}
|
GB_binop__isne_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_01__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_02__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_03__isne_uint32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__isne_uint32)
// A*D function (colscale): GB (_AxD__isne_uint32)
// D*A function (rowscale): GB (_DxB__isne_uint32)
// C+=B function (dense accum): GB (_Cdense_accumB__isne_uint32)
// C+=b function (dense accum): GB (_Cdense_accumb__isne_uint32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isne_uint32)
// C=scalar+B GB (_bind1st__isne_uint32)
// C=scalar+B' GB (_bind1st_tran__isne_uint32)
// C=A+scalar GB (_bind2nd__isne_uint32)
// C=A'+scalar GB (_bind2nd_tran__isne_uint32)
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij != bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x != y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISNE || GxB_NO_UINT32 || GxB_NO_ISNE_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__isne_uint32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *restrict Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__isne_uint32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__isne_uint32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__isne_uint32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x != bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__isne_uint32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij != y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x != aij) ; \
}
GrB_Info GB (_bind1st_tran__isne_uint32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij != y) ; \
}
GrB_Info GB (_bind2nd_tran__isne_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Matrix_mul_mp.c | //row major matrix multiplication
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <time.h>
#define n 350
int main()
{
int i,j;
double M1[n][n]={984.46},M2[n][n]={634.52},M3[n][n];
float startTime, endTime, execTime;
int omp_rank;
startTime = omp_get_wtime();
#pragma omp parallel private (i,j) shared (M1,M2,M3)
{
#pragma omp for
for(i=0 ; i< n ; i++)
{
omp_rank = omp_get_thread_num();
for(j=0 ; j< n ;j++)
{
M1[i][j] = 0;
for(int k=0;k< n; k++)
{
for(int k=0;k<100;k++)
M3[i][j] = M3[i][j] + M1[i][k] * M2[k][j];
}
}
}
}
endTime = omp_get_wtime();
execTime = endTime - startTime;
printf("%f \n",execTime);
return(0);
} |
Domain.h | /************************************************************************
* MechSys - Open Library for Mechanical Systems *
* Copyright (C) 2016 Sergio Galindo *
* *
* This program is free software: you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation, either version 3 of the License, or *
* any later version. *
* *
* This program is distributed in the hope that it will be useful, *
* but WITHOUT ANY WARRANTY; without even the implied warranty of *
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
* GNU General Public License for more details. *
* *
* You should have received a copy of the GNU General Public License *
* along with this program. If not, see <http://www.gnu.org/licenses/> *
************************************************************************/
#ifndef MECHSYS_FLBM_DOMAIN_H
#define MECHSYS_FLBM_DOMAIN_H
// Hdf5
#ifdef USE_HDF5
#include <hdf5.h>
#include <hdf5_hl.h>
#endif
#ifdef USE_OCL
#include <mechsys/oclaux/cl.hpp>
#endif
// Std lib
#ifdef USE_OMP
#include <omp.h>
#endif
//STD
#include<iostream>
// Mechsys
#include <mechsys/linalg/matvec.h>
#include <mechsys/util/util.h>
#include <mechsys/util/numstreams.h>
#include <mechsys/util/stopwatch.h>
#include <mechsys/util/numstreams.h>
enum LBMethod
{
D2Q5, ///< 2D 5 velocities
D2Q9, ///< 2D 9 velocities
D3Q15, ///< 3D 15 velocities
D3Q19, ///< 3D 19 velocities
//D3Q27 ///< 3D 27 velocities
};
namespace FLBM
{
#ifdef USE_OCL
typedef struct lbm_aux
{
size_t Nl; ///< Numver of Lattices
size_t Nneigh; ///< Number of Neighbors
size_t NCPairs; ///< Number of cell pairs
size_t Nx; ///< Integer vector with the dimensions of the LBM domain
size_t Ny; ///< Integer vector with the dimensions of the LBM domain
size_t Nz; ///< Integer vector with the dimensions of the LBM domain
size_t Op[27]; ///< Array with the opposite directions for bounce back calculation
cl_double3 C[27]; ///< Collection of discrete velocity vectors
double EEk[27]; ///< Dyadic product of discrete velocities for LES calculation
double W[27]; ///< Collection of discrete weights
double Tau[2]; ///< Collection of characteristic collision times
double G[2]; ///< Collection of cohesive constants for multiphase simulation
double Gs[2]; ///< Collection of cohesive constants for multiphase simulation
double Rhoref[2]; ///< Collection of cohesive constants for multiphase simulation
double Psi[2]; ///< Collection of cohesive constants for multiphase simulation
double Gmix; ///< Repulsion constant for multicomponent simulation
double Cs; ///< Lattice speed
double Sc; ///< Smagorinsky constant
} d_lbm_aux;
#endif
inline size_t Pt2idx(iVec3_t iv, iVec3_t & Dim) // Calculates the index of the cell at coordinates iv for a cubic lattice of dimensions Dim
{
return iv(0) + iv(1)*Dim(0) + iv(2)*Dim(0)*Dim(1);
}
inline void idx2Pt(size_t n, iVec3_t & iv, iVec3_t & Dim) // Calculates the coordinates from the index
{
iv(0) = n%Dim(0);
iv(1) = (n/Dim(0))%(Dim(1));
iv(2) = n/(Dim(0)*Dim(1));
}
class Domain
{
public:
static const double WEIGHTSD2Q5 [ 5]; ///< Weights for the equilibrium distribution functions (D2Q5)
static const double WEIGHTSD2Q9 [ 9]; ///< Weights for the equilibrium distribution functions (D2Q9)
static const double WEIGHTSD3Q15 [15]; ///< Weights for the equilibrium distribution functions (D3Q15)
static const double WEIGHTSD3Q19 [19]; ///< Weights for the equilibrium distribution functions (D3Q19)
//static const double WEIGHTSD3Q27 [27]; ///< Weights for the equilibrium distribution functions (D3Q27)
static const Vec3_t LVELOCD2Q5 [ 5]; ///< Local velocities (D2Q5)
static const Vec3_t LVELOCD2Q9 [ 9]; ///< Local velocities (D2Q9)
static const Vec3_t LVELOCD3Q15 [15]; ///< Local velocities (D3Q15)
static const Vec3_t LVELOCD3Q19 [19]; ///< Local velocities (D3Q19)
//static const Vec3_t LVELOCD3Q27 [27]; ///< Local velocities (D3Q27)
static const size_t OPPOSITED2Q5 [ 5]; ///< Opposite directions (D2Q5)
static const size_t OPPOSITED2Q9 [ 9]; ///< Opposite directions (D2Q9)
static const size_t OPPOSITED3Q15 [15]; ///< Opposite directions (D3Q15)
static const size_t OPPOSITED3Q19 [19]; ///< Opposite directions (D3Q19)
//static const size_t OPPOSITED3Q27 [27]; ///< Opposite directions (D3Q27)
//typedefs
typedef void (*ptDFun_t) (Domain & Dom, void * UserData);
//Constructors
Domain (LBMethod Method, ///< Type of array, for example D2Q9
Array<double> nu, ///< Viscosity for each fluid
iVec3_t Ndim, ///< Cell divisions per side
double dx, ///< Space spacing
double dt); ///< Time step
//Special constructor with only one component, the parameters are the same as above
Domain (LBMethod Method, ///< Type of array, for example D2Q9
double nu, ///< Viscosity for each fluid
iVec3_t Ndim, ///< Cell divisions per side
double dx, ///< Space spacing
double dt); ///< Time step
//Methods
void ApplyForcesSC(); ///< Apply the molecular forces for the single component case
void ApplyForcesMP(); ///< Apply the molecular forces for the multiphase case
void ApplyForcesSCMP(); ///< Apply the molecular forces for the both previous cases
void CollideSC(); ///< The collide step of LBM for single component simulations
void CollideMP(); ///< The collide step of LBM for multi phase simulations
void StreamSC(); ///< The stream step of LBM SC
void StreamMP(); ///< The stream step of LBM MP
void Initialize(size_t k, iVec3_t idx, double Rho, Vec3_t & Vel); ///< Initialize each cell with a given density and velocity
double Feq(size_t k, double Rho, Vec3_t & Vel); ///< The equilibrium function
void Solve(double Tf, double dtOut, ptDFun_t ptSetup=NULL, ptDFun_t ptReport=NULL,
char const * FileKey=NULL, bool RenderVideo=true, size_t Nproc=1); ///< Solve the Domain dynamics
//Writing Methods
void WriteXDMF (char const * FileKey); ///< Write the domain data in xdmf file
//Methods for OpenCL
#ifdef USE_OCL
void UpLoadDevice (); ///< Upload the buffers into the coprocessor device
void DnLoadDevice (); ///< Download the buffers into the coprocessor device
void ApplyForceCL (); ///< Apply forces in the coprocessor
void CollideCL (); ///< Apply collision operator in the coprocessor
void StreamCL (); ///< Apply streaming operator in the coprocessor
#endif
#ifdef USE_OMP
omp_lock_t lck; ///< to protect variables in multithreading
#endif
//Data
double ***** F; ///< The array containing the individual functions with the order of the lattice, the x,y,z coordinates and the order of the function.
double ***** Ftemp; ///< A similar array to hold provitional data
bool **** IsSolid; ///< An array of bools with an identifier to see if the cell is a solid cell
Vec3_t **** Vel; ///< The fluid velocities
Vec3_t **** BForce; ///< Body Force for each cell
double **** Rho; ///< The fluid densities
double * Tau; ///< The characteristic time of the lattice
double * G; ///< The attractive constant for multiphase simulations
double * Gs; ///< The attractive constant for solid phase
double * Psi; ///< Parameters for the Shan Chen pseudo potential
double * Rhoref; ///< Parameters for the Shan Chen pseudo potential
double Gmix; ///< The mixing constant for multicomponent simulations
size_t const * Op; ///< An array containing the indexes of the opposite direction for bounce back conditions
double const * W; ///< An array with the direction weights
double * EEk; ///< Diadic product of the velocity vectors
Vec3_t const * C; ///< The array of lattice velocities
size_t Nneigh; ///< Number of Neighbors, depends on the scheme
double dt; ///< Time Step
double dx; ///< Grid size
double Cs; ///< Lattice Velocity
bool IsFirstTime; ///< Bool variable checking if it is the first time function Setup is called
iVec3_t Ndim; ///< Lattice Dimensions
size_t Ncells; ///< Number of cells
size_t Nproc; ///< Number of processors for openmp
size_t idx_out; ///< The discrete time step for output
String FileKey; ///< File Key for output files
void * UserData; ///< User Data
size_t Step; ///< Lenght of averaging cube to save data
double Time; ///< Simulation time variable
size_t Nl; ///< Number of lattices (fluids)
double Sc; ///< Smagorinsky constant
//Array for pair calculation
size_t NCellPairs; ///< Number of cell pairs
iVec3_t * CellPairs; ///< Pairs of cells for molecular force calculation
#ifdef USE_OCL
cl::CommandQueue CL_Queue; ///< CL Queue for coprocessor commands
cl::Context CL_Context; ///< CL Context for coprocessor commands
cl::Program CL_Program; ///< CL program object containing the kernel routines
cl::Device CL_Device; ///< Identity of the accelrating device
cl::Buffer * bF; ///< Buffer with the distribution functions
cl::Buffer * bFtemp; ///< Buffer with the distribution functions temporal
cl::Buffer * bIsSolid; ///< Buffer with the solid bool information
cl::Buffer * bBForce; ///< Buffer with the body forces
cl::Buffer * bVel; ///< Buffer with the cell velocities
cl::Buffer * bRho; ///< Buffer with the cell densities
cl::Buffer * bCellPair; ///< Buffer with the pair cell information for force calculation
cl::Buffer blbmaux; ///< Buffer with the strcuture containing generic lbm information
size_t N_Groups; ///< Number of work gropous that the GPU can allocate
#endif
};
inline Domain::Domain(LBMethod TheMethod, Array<double> nu, iVec3_t TheNdim, double Thedx, double Thedt)
{
Util::Stopwatch stopwatch;
printf("\n%s--- Initializing LBM Domain --------------------------------------------%s\n",TERM_CLR1,TERM_RST);
if (nu.Size()==0) throw new Fatal("LBM::Domain: Declare at leat one fluid please");
if (TheNdim(2) >1&&(TheMethod==D2Q9 ||TheMethod==D2Q5 )) throw new Fatal("LBM::Domain: D2Q9 scheme does not allow for a third dimension, please set Ndim(2)=1 or change to D3Q15");
if (TheNdim(2)==1&&(TheMethod==D3Q15||TheMethod==D3Q19)) throw new Fatal("LBM::Domain: Ndim(2) is 1. Either change the method to D2Q9 or increase the z-dimension");
if (TheMethod==D2Q5)
{
Nneigh = 5;
W = WEIGHTSD2Q5;
C = LVELOCD2Q5;
Op = OPPOSITED2Q5;
}
if (TheMethod==D2Q9)
{
Nneigh = 9;
W = WEIGHTSD2Q9;
C = LVELOCD2Q9;
Op = OPPOSITED2Q9;
}
if (TheMethod==D3Q15)
{
Nneigh = 15;
W = WEIGHTSD3Q15;
C = LVELOCD3Q15;
Op = OPPOSITED3Q15;
}
if (TheMethod==D3Q19)
{
Nneigh = 19;
W = WEIGHTSD3Q19;
C = LVELOCD3Q19;
Op = OPPOSITED3Q19;
}
Time = 0.0;
dt = Thedt;
dx = Thedx;
Cs = dx/dt;
Step = 1;
Sc = 0.17;
Nl = nu.Size();
Ndim = TheNdim;
Ncells = Ndim(0)*Ndim(1)*Ndim(2);
IsFirstTime = true;
Tau = new double [Nl];
G = new double [Nl];
Gs = new double [Nl];
Rhoref = new double [Nl];
Psi = new double [Nl];
Gmix= 0.0;
F = new double **** [Nl];
Ftemp = new double **** [Nl];
Vel = new Vec3_t *** [Nl];
BForce = new Vec3_t *** [Nl];
Rho = new double *** [Nl];
IsSolid = new bool *** [Nl];
for (size_t i=0;i<Nl;i++)
{
Tau [i] = 3.0*nu[i]*dt/(dx*dx)+0.5;
G [i] = 0.0;
Gs [i] = 0.0;
Rhoref [i] = 200.0;
Psi [i] = 4.0;
F [i] = new double *** [Ndim(0)];
Ftemp [i] = new double *** [Ndim(0)];
Vel [i] = new Vec3_t ** [Ndim(0)];
BForce [i] = new Vec3_t ** [Ndim(0)];
Rho [i] = new double ** [Ndim(0)];
IsSolid [i] = new bool ** [Ndim(0)];
for (size_t nx=0;nx<Ndim(0);nx++)
{
F [i][nx] = new double ** [Ndim(1)];
Ftemp [i][nx] = new double ** [Ndim(1)];
Vel [i][nx] = new Vec3_t * [Ndim(1)];
BForce [i][nx] = new Vec3_t * [Ndim(1)];
Rho [i][nx] = new double * [Ndim(1)];
IsSolid [i][nx] = new bool * [Ndim(1)];
for (size_t ny=0;ny<Ndim(1);ny++)
{
F [i][nx][ny] = new double * [Ndim(2)];
Ftemp [i][nx][ny] = new double * [Ndim(2)];
Vel [i][nx][ny] = new Vec3_t [Ndim(2)];
BForce [i][nx][ny] = new Vec3_t [Ndim(2)];
Rho [i][nx][ny] = new double [Ndim(2)];
IsSolid [i][nx][ny] = new bool [Ndim(2)];
for (size_t nz=0;nz<Ndim(2);nz++)
{
F [i][nx][ny][nz] = new double [Nneigh];
Ftemp[i][nx][ny][nz] = new double [Nneigh];
IsSolid[i][nx][ny][nz] = false;
for (size_t nn=0;nn<Nneigh;nn++)
{
F [i][nx][ny][nz][nn] = 0.0;
Ftemp[i][nx][ny][nz][nn] = 0.0;
}
}
}
}
}
EEk = new double [Nneigh];
for (size_t k=0;k<Nneigh;k++)
{
EEk[k] = 0.0;
for (size_t n=0;n<3;n++)
for (size_t m=0;m<3;m++)
{
EEk[k] += fabs(C[k][n]*C[k][m]);
}
}
printf("%s Num of cells = %zd%s\n",TERM_CLR2,Nl*Ncells,TERM_RST);
#ifdef USE_OMP
omp_init_lock(&lck);
#endif
}
inline Domain::Domain(LBMethod TheMethod, double Thenu, iVec3_t TheNdim, double Thedx, double Thedt)
{
Array<double> nu(1);
nu[0] = Thenu;
Util::Stopwatch stopwatch;
printf("\n%s--- Initializing LBM Domain --------------------------------------------%s\n",TERM_CLR1,TERM_RST);
if (nu.Size()==0) throw new Fatal("LBM::Domain: Declare at leat one fluid please");
if (TheNdim(2) >1&&(TheMethod==D2Q9 ||TheMethod==D2Q5 )) throw new Fatal("LBM::Domain: D2Q9 scheme does not allow for a third dimension, please set Ndim(2)=1 or change to D3Q15");
if (TheNdim(2)==1&&(TheMethod==D3Q15||TheMethod==D3Q19)) throw new Fatal("LBM::Domain: Ndim(2) is 1. Either change the method to D2Q9 or increase the z-dimension");
if (TheMethod==D2Q5)
{
Nneigh = 5;
W = WEIGHTSD2Q5;
C = LVELOCD2Q5;
Op = OPPOSITED2Q5;
}
if (TheMethod==D2Q9)
{
Nneigh = 9;
W = WEIGHTSD2Q9;
C = LVELOCD2Q9;
Op = OPPOSITED2Q9;
}
if (TheMethod==D3Q15)
{
Nneigh = 15;
W = WEIGHTSD3Q15;
C = LVELOCD3Q15;
Op = OPPOSITED3Q15;
}
if (TheMethod==D3Q19)
{
Nneigh = 19;
W = WEIGHTSD3Q19;
C = LVELOCD3Q19;
Op = OPPOSITED3Q19;
}
Time = 0.0;
dt = Thedt;
dx = Thedx;
Cs = dx/dt;
Step = 1;
Sc = 0.17;
Nl = 1;
Ndim = TheNdim;
Ncells = Ndim(0)*Ndim(1)*Ndim(2);
IsFirstTime = true;
Tau = new double [Nl];
G = new double [Nl];
Gs = new double [Nl];
Rhoref = new double [Nl];
Psi = new double [Nl];
Gmix = 0.0;
F = new double **** [Nl];
Ftemp = new double **** [Nl];
Vel = new Vec3_t *** [Nl];
BForce = new Vec3_t *** [Nl];
Rho = new double *** [Nl];
IsSolid = new bool *** [Nl];
for (size_t i=0;i<Nl;i++)
{
Tau [i] = 3.0*nu[i]*dt/(dx*dx)+0.5;
G [i] = 0.0;
Gs [i] = 0.0;
Rhoref [i] = 200.0;
Psi [i] = 4.0;
F [i] = new double *** [Ndim(0)];
Ftemp [i] = new double *** [Ndim(0)];
Vel [i] = new Vec3_t ** [Ndim(0)];
BForce [i] = new Vec3_t ** [Ndim(0)];
Rho [i] = new double ** [Ndim(0)];
IsSolid [i] = new bool ** [Ndim(0)];
for (size_t nx=0;nx<Ndim(0);nx++)
{
F [i][nx] = new double ** [Ndim(1)];
Ftemp [i][nx] = new double ** [Ndim(1)];
Vel [i][nx] = new Vec3_t * [Ndim(1)];
BForce [i][nx] = new Vec3_t * [Ndim(1)];
Rho [i][nx] = new double * [Ndim(1)];
IsSolid [i][nx] = new bool * [Ndim(1)];
for (size_t ny=0;ny<Ndim(1);ny++)
{
F [i][nx][ny] = new double * [Ndim(2)];
Ftemp [i][nx][ny] = new double * [Ndim(2)];
Vel [i][nx][ny] = new Vec3_t [Ndim(2)];
BForce [i][nx][ny] = new Vec3_t [Ndim(2)];
Rho [i][nx][ny] = new double [Ndim(2)];
IsSolid [i][nx][ny] = new bool [Ndim(2)];
for (size_t nz=0;nz<Ndim(2);nz++)
{
F [i][nx][ny][nz] = new double [Nneigh];
Ftemp[i][nx][ny][nz] = new double [Nneigh];
IsSolid[i][nx][ny][nz] = false;
for (size_t nn=0;nn<Nneigh;nn++)
{
F [i][nx][ny][nz][nn] = 0.0;
Ftemp[i][nx][ny][nz][nn] = 0.0;
}
}
}
}
}
EEk = new double [Nneigh];
for (size_t k=0;k<Nneigh;k++)
{
EEk[k] = 0.0;
for (size_t n=0;n<3;n++)
for (size_t m=0;m<3;m++)
{
EEk[k] += fabs(C[k][n]*C[k][m]);
}
}
printf("%s Num of cells = %zd%s\n",TERM_CLR2,Nl*Ncells,TERM_RST);
#ifdef USE_OMP
omp_init_lock(&lck);
#endif
}
inline void Domain::WriteXDMF(char const * FileKey)
{
String fn(FileKey);
fn.append(".h5");
hid_t file_id;
file_id = H5Fcreate(fn.CStr(), H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
size_t Nx = Ndim[0]/Step;
size_t Ny = Ndim[1]/Step;
size_t Nz = Ndim[2]/Step;
for (size_t j=0;j<Nl;j++)
{
// Creating data sets
double * Density = new double[ Nx*Ny*Nz];
double * Gamma = new double[ Nx*Ny*Nz];
double * Vvec = new double[3*Nx*Ny*Nz];
double * Sstate = new double[Nneigh*Nx*Ny*Nz];
size_t i=0;
for (size_t m=0;m<Ndim(2);m+=Step)
for (size_t l=0;l<Ndim(1);l+=Step)
for (size_t n=0;n<Ndim(0);n+=Step)
{
double rho = 0.0;
double gamma = 0.0;
Vec3_t vel = OrthoSys::O;
for (size_t ni=0;ni<Step;ni++)
for (size_t li=0;li<Step;li++)
for (size_t mi=0;mi<Step;mi++)
{
rho += Rho [j][n+ni][l+li][m+mi];
gamma += IsSolid[j][n+ni][l+li][m+mi] ? 1.0: 0.0;
vel += Vel [j][n+ni][l+li][m+mi];
}
rho /= Step*Step*Step;
gamma/= Step*Step*Step;
vel /= Step*Step*Step;
Density [i] = (double) rho;
Gamma [i] = (double) gamma;
Vvec[3*i ] = (double) vel(0);
Vvec[3*i+1] = (double) vel(1);
Vvec[3*i+2] = (double) vel(2);
i++;
}
size_t i_state=0;
for (size_t m=0;m<Ndim(2);m+=1)
for (size_t l=0;l<Ndim(1);l+=1)
for (size_t n=0;n<Ndim(0);n+=1)
{
for (size_t k=0;k<Nneigh;k+=1)
{
Sstate[i_state] = (double) F[j][n][l][m][k];
i_state++;
}
}
//Writing data to h5 file
hsize_t dims[1];
dims[0] = Nx*Ny*Nz;
String dsname;
dsname.Printf("Density_%d",j);
H5LTmake_dataset_double(file_id,dsname.CStr(),1,dims,Density );
if (j==0)
{
dsname.Printf("Gamma");
H5LTmake_dataset_double(file_id,dsname.CStr(),1,dims,Gamma );
}
dims[0] = 3*Nx*Ny*Nz;
dsname.Printf("Velocity_%d",j);
H5LTmake_dataset_double(file_id,dsname.CStr(),1,dims,Vvec );
// write full state of simulation
dims[0] = Nneigh*Nx*Ny*Nz;
dsname.Printf("State_%d",j);
H5LTmake_dataset_double(file_id,dsname.CStr(),1,dims,Sstate );
dims[0] = 1;
int N[1];
N[0] = Nx;
dsname.Printf("Nx");
H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N);
dims[0] = 1;
N[0] = Ny;
dsname.Printf("Ny");
H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N);
dims[0] = 1;
N[0] = Nz;
dsname.Printf("Nz");
H5LTmake_dataset_int(file_id,dsname.CStr(),1,dims,N);
delete [] Density ;
delete [] Gamma ;
delete [] Vvec ;
delete [] Sstate ;
}
//Closing the file
H5Fflush(file_id,H5F_SCOPE_GLOBAL);
H5Fclose(file_id);
// Writing xmf fil
std::ostringstream oss;
//std::cout << "2" << std::endl;
if (Ndim(2)==1)
{
oss << "<?xml version=\"1.0\" ?>\n";
oss << "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n";
oss << "<Xdmf Version=\"2.0\">\n";
oss << " <Domain>\n";
oss << " <Grid Name=\"mesh1\" GridType=\"Uniform\">\n";
oss << " <Topology TopologyType=\"2DCoRectMesh\" Dimensions=\"" << Ndim(1) << " " << Ndim(0) << "\"/>\n";
oss << " <Geometry GeometryType=\"ORIGIN_DXDY\">\n";
oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"2\"> 0.0 0.0\n";
oss << " </DataItem>\n";
oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"2\"> 1.0 1.0\n";
oss << " </DataItem>\n";
oss << " </Geometry>\n";
for (size_t j=0;j<Nl;j++)
{
oss << " <Attribute Name=\"Density_" << j << "\" AttributeType=\"Scalar\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Ndim(0) << " " << Ndim(1) << " " << Ndim(2) << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Density_" << j << "\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
oss << " <Attribute Name=\"Velocity_" << j << "\" AttributeType=\"Vector\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Ndim(0) << " " << Ndim(1) << " " << Ndim(2) << " 3\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Velocity_" << j << "\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
}
oss << " <Attribute Name=\"Gamma\" AttributeType=\"Scalar\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Ndim(0) << " " << Ndim(1) << " " << Ndim(2) << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Gamma\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
oss << " </Grid>\n";
oss << " </Domain>\n";
oss << "</Xdmf>\n";
}
else
{
oss << "<?xml version=\"1.0\" ?>\n";
oss << "<!DOCTYPE Xdmf SYSTEM \"Xdmf.dtd\" []>\n";
oss << "<Xdmf Version=\"2.0\">\n";
oss << " <Domain>\n";
oss << " <Grid Name=\"LBM_Mesh\" GridType=\"Uniform\">\n";
oss << " <Topology TopologyType=\"3DCoRectMesh\" Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\"/>\n";
oss << " <Geometry GeometryType=\"ORIGIN_DXDYDZ\">\n";
oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"3\"> 0.0 0.0 0.0\n";
oss << " </DataItem>\n";
oss << " <DataItem Format=\"XML\" NumberType=\"Float\" Dimensions=\"3\"> " << Step*dx << " " << Step*dx << " " << Step*dx << "\n";
oss << " </DataItem>\n";
oss << " </Geometry>\n";
for (size_t j=0;j<Nl;j++)
{
oss << " <Attribute Name=\"Density_" << j << "\" AttributeType=\"Scalar\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Density_" << j << "\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
oss << " <Attribute Name=\"Velocity_" << j << "\" AttributeType=\"Vector\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << " 3\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Velocity_" << j << "\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
}
oss << " <Attribute Name=\"Gamma\" AttributeType=\"Scalar\" Center=\"Node\">\n";
oss << " <DataItem Dimensions=\"" << Nz << " " << Ny << " " << Nx << "\" NumberType=\"Float\" Precision=\"4\" Format=\"HDF\">\n";
oss << " " << fn.CStr() <<":/Gamma\n";
oss << " </DataItem>\n";
oss << " </Attribute>\n";
oss << " </Grid>\n";
oss << " </Domain>\n";
oss << "</Xdmf>\n";
}
fn = FileKey;
fn.append(".xmf");
std::ofstream of(fn.CStr(), std::ios::out);
of << oss.str();
of.close();
}
inline double Domain::Feq(size_t k, double Rho, Vec3_t & V)
{
double VdotC = dot(V,C[k]);
double VdotV = dot(V,V);
return W[k]*Rho*(1.0 + 3.0*VdotC/Cs + 4.5*VdotC*VdotC/(Cs*Cs) - 1.5*VdotV/(Cs*Cs));
}
inline void Domain::Initialize(size_t il, iVec3_t idx, double TheRho, Vec3_t & TheVel)
{
size_t ix = idx(0);
size_t iy = idx(1);
size_t iz = idx(2);
BForce[il][ix][iy][iz] = OrthoSys::O;
for (size_t k=0;k<Nneigh;k++)
{
F[il][ix][iy][iz][k] = Feq(k,TheRho,TheVel);
}
if (!IsSolid[il][ix][iy][iz])
{
Vel[il][ix][iy][iz] = TheVel;
Rho[il][ix][iy][iz] = TheRho;
}
else
{
Vel[il][ix][iy][iz] = OrthoSys::O;
Rho[il][ix][iy][iz] = 0.0;
}
}
inline void Domain::ApplyForcesSC()
{
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t n=0;n<NCellPairs;n++)
{
iVec3_t idxc,idxn;
size_t k = CellPairs[n](2);
idx2Pt(CellPairs[n](0),idxc,Ndim);
idx2Pt(CellPairs[n](1),idxn,Ndim);
size_t ixc = idxc(0);
size_t iyc = idxc(1);
size_t izc = idxc(2);
size_t ixn = idxn(0);
size_t iyn = idxn(1);
size_t izn = idxn(2);
double psic = 0.0;
double psin = 0.0;
IsSolid[0][ixc][iyc][izc] ? psic = 0.0 : psic = Psi[0]*exp(-Rhoref[0]/Rho[0][ixc][iyc][izc]);
IsSolid[0][ixn][iyn][izn] ? psin = 0.0 : psin = Psi[0]*exp(-Rhoref[0]/Rho[0][ixn][iyn][izn]);
Vec3_t bforce = -G[0]*W[k]*C[k]*psic*psin;
BForce[0][ixc][iyc][izc] += bforce;
BForce[0][ixn][iyn][izn] -= bforce;
}
}
inline void Domain::ApplyForcesMP()
{
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t n=0;n<NCellPairs;n++)
{
iVec3_t idxc,idxn;
size_t k = CellPairs[n](2);
idx2Pt(CellPairs[n](0),idxc,Ndim);
idx2Pt(CellPairs[n](1),idxn,Ndim);
size_t ixc = idxc(0);
size_t iyc = idxc(1);
size_t izc = idxc(2);
size_t ixn = idxn(0);
size_t iyn = idxn(1);
size_t izn = idxn(2);
double psic = 0.0;
double psin = 0.0;
double Gt = Gmix;
IsSolid[0][ixc][iyc][izc] ? psic = 1.0, Gt = Gs[1] : psic = Rho[0][ixc][iyc][izc];
IsSolid[1][ixn][iyn][izn] ? psin = 1.0, Gt = Gs[0] : psin = Rho[1][ixn][iyn][izn];
Vec3_t bforce = -Gt*W[k]*C[k]*psic*psin;
BForce[0][ixc][iyc][izc] += bforce;
BForce[1][ixn][iyn][izn] -= bforce;
Gt = Gmix;
IsSolid[1][ixc][iyc][izc] ? psic = 1.0, Gt = Gs[0] : psic = Rho[1][ixc][iyc][izc];
IsSolid[0][ixn][iyn][izn] ? psin = 1.0, Gt = Gs[1] : psin = Rho[0][ixn][iyn][izn];
bforce = -Gt*W[k]*C[k]*psic*psin;
BForce[1][ixc][iyc][izc] += bforce;
BForce[0][ixn][iyn][izn] -= bforce;
}
}
inline void Domain::ApplyForcesSCMP()
{
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t n=0;n<NCellPairs;n++)
{
iVec3_t idxc,idxn;
size_t k = CellPairs[n](2);
idx2Pt(CellPairs[n](0),idxc,Ndim);
idx2Pt(CellPairs[n](1),idxn,Ndim);
size_t ixc = idxc(0);
size_t iyc = idxc(1);
size_t izc = idxc(2);
size_t ixn = idxn(0);
size_t iyn = idxn(1);
size_t izn = idxn(2);
double psic = 0.0;
double psin = 0.0;
IsSolid[0][ixc][iyc][izc] ? psic = 0.0 : psic = Psi[0]*exp(-Rhoref[0]/Rho[0][ixc][iyc][izc]);
IsSolid[0][ixn][iyn][izn] ? psin = 0.0 : psin = Psi[0]*exp(-Rhoref[0]/Rho[0][ixn][iyn][izn]);
Vec3_t bforce = -G[0]*W[k]*C[k]*psic*psin;
BForce[0][ixc][iyc][izc] += bforce;
BForce[0][ixn][iyn][izn] -= bforce;
IsSolid[1][ixc][iyc][izc] ? psic = 0.0 : psic = Psi[1]*exp(-Rhoref[1]/Rho[1][ixc][iyc][izc]);
IsSolid[1][ixn][iyn][izn] ? psin = 0.0 : psin = Psi[1]*exp(-Rhoref[1]/Rho[1][ixn][iyn][izn]);
bforce = -G[1]*W[k]*C[k]*psic*psin;
BForce[1][ixc][iyc][izc] += bforce;
BForce[1][ixn][iyn][izn] -= bforce;
double Gt = Gmix;
IsSolid[0][ixc][iyc][izc] ? psic = 1.0, Gt = Gs[1] : psic = Rho[0][ixc][iyc][izc];
IsSolid[1][ixn][iyn][izn] ? psin = 1.0, Gt = Gs[0] : psin = Rho[1][ixn][iyn][izn];
bforce = -Gt*W[k]*C[k]*psic*psin;
BForce[0][ixc][iyc][izc] += bforce;
BForce[1][ixn][iyn][izn] -= bforce;
Gt = Gmix;
IsSolid[1][ixc][iyc][izc] ? psic = 1.0, Gt = Gs[0] : psic = Rho[1][ixc][iyc][izc];
IsSolid[0][ixn][iyn][izn] ? psin = 1.0, Gt = Gs[1] : psin = Rho[0][ixn][iyn][izn];
bforce = -Gt*W[k]*C[k]*psic*psin;
BForce[1][ixc][iyc][izc] += bforce;
BForce[0][ixn][iyn][izn] -= bforce;
}
}
inline void Domain::CollideSC()
{
size_t nx = Ndim(0);
size_t ny = Ndim(1);
size_t nz = Ndim(2);
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
if (!IsSolid[0][ix][iy][iz])
{
double NonEq[Nneigh];
double Q = 0.0;
double tau = Tau[0];
double rho = Rho[0][ix][iy][iz];
Vec3_t vel = Vel[0][ix][iy][iz]+dt*BForce[0][ix][iy][iz]/rho;
double VdotV = dot(vel,vel);
for (size_t k=0;k<Nneigh;k++)
{
double VdotC = dot(vel,C[k]);
double Feq = W[k]*rho*(1.0 + 3.0*VdotC/Cs + 4.5*VdotC*VdotC/(Cs*Cs) - 1.5*VdotV/(Cs*Cs));
NonEq[k] = F[0][ix][iy][iz][k] - Feq;
Q += NonEq[k]*NonEq[k]*EEk[k];
}
Q = sqrt(2.0*Q);
tau = 0.5*(tau+sqrt(tau*tau + 6.0*Q*Sc/rho));
bool valid = true;
double alpha = 1.0;
while (valid)
{
valid = false;
for (size_t k=0;k<Nneigh;k++)
{
Ftemp[0][ix][iy][iz][k] = F[0][ix][iy][iz][k] - alpha*NonEq[k]/tau;
if (Ftemp[0][ix][iy][iz][k]<-1.0e-12)
{
//std::cout << Ftemp[0][ix][iy][iz][k] << std::endl;
double temp = tau*F[0][ix][iy][iz][k]/NonEq[k];
if (temp<alpha) alpha = temp;
valid = true;
}
if (std::isnan(Ftemp[0][ix][iy][iz][k]))
{
std::cout << "CollideSC: Nan found, resetting" << std::endl;
std::cout << " " << alpha << " " << iVec3_t(ix,iy,iz) << " " << k << " " << std::endl;
throw new Fatal("Domain::CollideSC: Distribution funcitons gave nan value, check parameters");
}
}
}
}
else
{
for (size_t k=0;k<Nneigh;k++)
{
Ftemp[0][ix][iy][iz][k] = F[0][ix][iy][iz][Op[k]];
}
}
}
double ***** tmp = F;
F = Ftemp;
Ftemp = tmp;
}
inline void Domain::CollideMP ()
{
size_t nx = Ndim(0);
size_t ny = Ndim(1);
size_t nz = Ndim(2);
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
Vec3_t Vmix = OrthoSys::O;
double den = 0.0;
for (size_t il=0;il<Nl;il++)
{
Vmix += Rho[il][ix][iy][iz]*Vel[il][ix][iy][iz]/Tau[il];
den += Rho[il][ix][iy][iz]/Tau[il];
}
Vmix /= den;
for (size_t il=0;il<Nl;il++)
{
if (!IsSolid[il][ix][iy][iz])
{
double rho = Rho[il][ix][iy][iz];
Vec3_t vel = Vmix + Tau[il]*BForce[il][ix][iy][iz]/rho;
double VdotV = dot(vel,vel);
bool valid = true;
double alpha = 1.0;
while (valid)
{
valid = false;
for (size_t k=0;k<Nneigh;k++)
{
double VdotC = dot(vel,C[k]);
double Feq = W[k]*rho*(1.0 + 3.0*VdotC/Cs + 4.5*VdotC*VdotC/(Cs*Cs) - 1.5*VdotV/(Cs*Cs));
Ftemp[il][ix][iy][iz][k] = F[il][ix][iy][iz][k] - alpha*(F[il][ix][iy][iz][k] - Feq)/Tau[il];
if (Ftemp[il][ix][iy][iz][k]<-1.0e-12)
{
//std::cout << Ftemp[0][ix][iy][iz][k] << std::endl;
double temp = Tau[il]*F[il][ix][iy][iz][k]/(F[il][ix][iy][iz][k] - Feq);
if (temp<alpha) alpha = temp;
valid = true;
}
if (std::isnan(Ftemp[il][ix][iy][iz][k]))
{
std::cout << "CollideMP: Nan found, resetting" << std::endl;
std::cout << " " << alpha << " " << iVec3_t(ix,iy,iz) << " " << k << " " << std::endl;
throw new Fatal("Domain::CollideMP: Distribution funcitons gave nan value, check parameters");
}
}
}
}
else
{
for (size_t k=0;k<Nneigh;k++)
{
Ftemp[il][ix][iy][iz][k] = F[il][ix][iy][iz][Op[k]];
}
}
}
}
double ***** tmp = F;
F = Ftemp;
Ftemp = tmp;
}
inline void Domain::StreamSC()
{
size_t nx = Ndim(0);
size_t ny = Ndim(1);
size_t nz = Ndim(2);
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
for (size_t k=0;k<Nneigh;k++)
{
size_t nix = (size_t)((int)ix + (int)C[k](0) + (int)Ndim(0))%Ndim(0);
size_t niy = (size_t)((int)iy + (int)C[k](1) + (int)Ndim(1))%Ndim(1);
size_t niz = (size_t)((int)iz + (int)C[k](2) + (int)Ndim(2))%Ndim(2);
Ftemp[0][nix][niy][niz][k] = F[0][ix][iy][iz][k];
}
}
double ***** tmp = F;
F = Ftemp;
Ftemp = tmp;
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
BForce[0][ix][iy][iz] = OrthoSys::O;
Vel [0][ix][iy][iz] = OrthoSys::O;
Rho [0][ix][iy][iz] = 0.0;
if (!IsSolid[0][ix][iy][iz])
{
for (size_t k=0;k<Nneigh;k++)
{
Rho[0][ix][iy][iz] += F[0][ix][iy][iz][k];
Vel[0][ix][iy][iz] += F[0][ix][iy][iz][k]*C[k];
}
Vel[0][ix][iy][iz] /= Rho[0][ix][iy][iz];
}
}
}
inline void Domain::StreamMP()
{
size_t nx = Ndim(0);
size_t ny = Ndim(1);
size_t nz = Ndim(2);
for (size_t il=0;il<Nl;il++)
{
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
for (size_t k=0;k<Nneigh;k++)
{
size_t nix = (size_t)((int)ix + (int)C[k](0) + (int)Ndim(0))%Ndim(0);
size_t niy = (size_t)((int)iy + (int)C[k](1) + (int)Ndim(1))%Ndim(1);
size_t niz = (size_t)((int)iz + (int)C[k](2) + (int)Ndim(2))%Ndim(2);
Ftemp[il][nix][niy][niz][k] = F[il][ix][iy][iz][k];
}
}
}
double ***** tmp = F;
F = Ftemp;
Ftemp = tmp;
#ifdef USE_OMP
#pragma omp parallel for schedule(static) num_threads(Nproc)
#endif
for (size_t ix=0;ix<nx;ix++)
for (size_t iy=0;iy<ny;iy++)
for (size_t iz=0;iz<nz;iz++)
{
for (size_t il=0;il<Nl;il++)
{
BForce[il][ix][iy][iz] = OrthoSys::O;
Vel [il][ix][iy][iz] = OrthoSys::O;
Rho [il][ix][iy][iz] = 0.0;
if (!IsSolid[il][ix][iy][iz])
{
for (size_t k=0;k<Nneigh;k++)
{
Rho[il][ix][iy][iz] += F[il][ix][iy][iz][k];
Vel[il][ix][iy][iz] += F[il][ix][iy][iz][k]*C[k];
}
Vel[il][ix][iy][iz] /= Rho[il][ix][iy][iz];
}
}
}
}
#ifdef USE_OCL
inline void Domain::UpLoadDevice()
{
bF = new cl::Buffer [Nl];
bFtemp = new cl::Buffer [Nl];
bIsSolid = new cl::Buffer [Nl];
bBForce = new cl::Buffer [Nl];
bVel = new cl::Buffer [Nl];
bRho = new cl::Buffer [Nl];
blbmaux = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(lbm_aux ) );
lbm_aux lbmaux[1];
lbmaux[0].Nx = Ndim(0);
lbmaux[0].Ny = Ndim(1);
lbmaux[0].Nz = Ndim(2);
lbmaux[0].Nneigh = Nneigh;
lbmaux[0].NCPairs = NCellPairs;
lbmaux[0].Nl = Nl;
lbmaux[0].Gmix = Gmix;
lbmaux[0].Cs = Cs;
lbmaux[0].Sc = Sc;
for (size_t nn=0;nn<Nneigh;nn++)
{
lbmaux[0].C [nn].s[0] = C [nn](0);
lbmaux[0].C [nn].s[1] = C [nn](1);
lbmaux[0].C [nn].s[2] = C [nn](2);
lbmaux[0].EEk[nn] = EEk[nn] ;
lbmaux[0].W [nn] = W [nn] ;
lbmaux[0].Op [nn] = Op [nn] ;
}
for (size_t il=0;il<Nl;il++)
{
lbmaux[0].Tau [il] = Tau [il];
lbmaux[0].G [il] = G [il];
lbmaux[0].Gs [il] = Gs [il];
lbmaux[0].Rhoref [il] = Rhoref [il];
lbmaux[0].Psi [il] = Psi [il];
bF [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(double )*Ncells*Nneigh);
bFtemp [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(double )*Ncells*Nneigh);
bIsSolid [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(bool )*Ncells );
bBForce [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(cl_double3)*Ncells );
bVel [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(cl_double3)*Ncells );
bRho [il] = cl::Buffer(CL_Context,CL_MEM_READ_WRITE,sizeof(double )*Ncells );
bool *IsSolidCL;
double *FCL,*FtempCL,*RhoCL;
cl_double3 *VelCL,*BForceCL;
FCL = new double [Ncells*Nneigh];
FtempCL = new double [Ncells*Nneigh];
IsSolidCL = new bool [Ncells ];
RhoCL = new double [Ncells ];
VelCL = new cl_double3[Ncells ];
BForceCL = new cl_double3[Ncells ];
size_t Nn = 0;
size_t Nm = 0;
for (size_t nz=0;nz<Ndim(2);nz++)
{
for (size_t ny=0;ny<Ndim(1);ny++)
{
for (size_t nx=0;nx<Ndim(0);nx++)
{
IsSolidCL[Nm] = IsSolid [il][nx][ny][nz];
RhoCL [Nm] = Rho [il][nx][ny][nz];
VelCL [Nm].s[0] = Vel [il][nx][ny][nz][0];
VelCL [Nm].s[1] = Vel [il][nx][ny][nz][1];
VelCL [Nm].s[2] = Vel [il][nx][ny][nz][2];
BForceCL [Nm].s[0] = BForce [il][nx][ny][nz][0];
BForceCL [Nm].s[1] = BForce [il][nx][ny][nz][1];
BForceCL [Nm].s[2] = BForce [il][nx][ny][nz][2];
Nm++;
for (size_t nn=0;nn<Nneigh;nn++)
{
FCL [Nn] = F [il][nx][ny][nz][nn];
FtempCL[Nn] = Ftemp[il][nx][ny][nz][nn];
Nn++;
}
}
}
}
CL_Queue.enqueueWriteBuffer(bF [il],CL_TRUE,0,sizeof(double )*Ncells*Nneigh,FCL );
CL_Queue.enqueueWriteBuffer(bFtemp [il],CL_TRUE,0,sizeof(double )*Ncells*Nneigh,FtempCL );
CL_Queue.enqueueWriteBuffer(bIsSolid[il],CL_TRUE,0,sizeof(bool )*Ncells ,IsSolidCL );
CL_Queue.enqueueWriteBuffer(bBForce [il],CL_TRUE,0,sizeof(cl_double3)*Ncells ,BForceCL );
CL_Queue.enqueueWriteBuffer(bVel [il],CL_TRUE,0,sizeof(cl_double3)*Ncells ,VelCL );
CL_Queue.enqueueWriteBuffer(bRho [il],CL_TRUE,0,sizeof(double )*Ncells ,RhoCL );
delete [] FCL ;
delete [] FtempCL ;
delete [] IsSolidCL ;
delete [] RhoCL ;
delete [] VelCL ;
delete [] BForceCL ;
}
CL_Queue.enqueueWriteBuffer(blbmaux ,CL_TRUE,0,sizeof(lbm_aux ) ,lbmaux );
cl::Kernel kernel = cl::Kernel(CL_Program,"CheckUpLoad");
kernel.setArg(0,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(1),cl::NullRange);
CL_Queue.finish();
}
inline void Domain::DnLoadDevice()
{
for (size_t il=0;il<Nl;il++)
{
double * RhoCL;
cl_double3 * VelCL;
double * FCL;
RhoCL = new double [Ncells];
VelCL = new cl_double3[Ncells];
FCL = new double [Nneigh*Ncells];
CL_Queue.enqueueReadBuffer(bRho[il],CL_TRUE,0,sizeof(double )*Ncells,RhoCL);
CL_Queue.enqueueReadBuffer(bVel[il],CL_TRUE,0,sizeof(cl_double3)*Ncells,VelCL);
CL_Queue.enqueueReadBuffer(bF[il],CL_TRUE,0,sizeof(double )*Ncells*Nneigh,FCL);
size_t Nm = 0;
size_t Nfm = 0;
for (size_t nz=0;nz<Ndim(2);nz++)
{
for (size_t ny=0;ny<Ndim(1);ny++)
{
for (size_t nx=0;nx<Ndim(0);nx++)
{
Rho[il][nx][ny][nz] = RhoCL[Nm] ;
Vel[il][nx][ny][nz][0] = VelCL[Nm].s[0];
Vel[il][nx][ny][nz][1] = VelCL[Nm].s[1];
Vel[il][nx][ny][nz][2] = VelCL[Nm].s[2];
for (size_t nn=0;nn<Nneigh;nn++)
{
F[il][nx][ny][nz][nn] = FCL[Nfm];
Nfm++;
}
Nm++;
}
}
}
delete [] RhoCL ;
delete [] VelCL ;
delete [] FCL ;
}
}
inline void Domain::ApplyForceCL()
{
cl::Kernel kernel;
if (Nl==1)
{
kernel = cl::Kernel(CL_Program,"ApplyForcesSC" );
kernel.setArg(0,bIsSolid[0]);
kernel.setArg(1,bBForce [0]);
kernel.setArg(2,bRho [0]);
kernel.setArg(3,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
}
else if ((fabs(G[0])>1.0e-12)||(fabs(G[1])>1.0e-12))
{
kernel = cl::Kernel(CL_Program,"ApplyForcesSCMP");
kernel.setArg(0,bIsSolid[0]);
kernel.setArg(1,bIsSolid[1]);
kernel.setArg(2,bBForce [0]);
kernel.setArg(3,bBForce [1]);
kernel.setArg(4,bRho [0]);
kernel.setArg(5,bRho [1]);
kernel.setArg(6,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
}
else
{
kernel = cl::Kernel(CL_Program,"ApplyForcesMP" );
kernel.setArg(0,bIsSolid[0]);
kernel.setArg(1,bIsSolid[1]);
kernel.setArg(2,bBForce [0]);
kernel.setArg(3,bBForce [1]);
kernel.setArg(4,bRho [0]);
kernel.setArg(5,bRho [1]);
kernel.setArg(6,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
}
}
inline void Domain::CollideCL()
{
cl::Kernel kernel;
if (Nl==1)
{
kernel = cl::Kernel(CL_Program,"CollideSC");
kernel.setArg(0,bIsSolid[0]);
kernel.setArg(1,bF [0]);
kernel.setArg(2,bFtemp [0]);
kernel.setArg(3,bBForce [0]);
kernel.setArg(4,bVel [0]);
kernel.setArg(5,bRho [0]);
kernel.setArg(6,blbmaux );
}
else
{
kernel = cl::Kernel(CL_Program,"CollideMP");
kernel.setArg( 0,bIsSolid[0]);
kernel.setArg( 1,bIsSolid[1]);
kernel.setArg( 2,bF [0]);
kernel.setArg( 3,bF [1]);
kernel.setArg( 4,bFtemp [0]);
kernel.setArg( 5,bFtemp [1]);
kernel.setArg( 6,bBForce [0]);
kernel.setArg( 7,bBForce [1]);
kernel.setArg( 8,bVel [0]);
kernel.setArg( 9,bVel [1]);
kernel.setArg(10,bRho [0]);
kernel.setArg(11,bRho [1]);
kernel.setArg(12,blbmaux );
}
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
}
inline void Domain::StreamCL()
{
for (size_t il=0;il<Nl;il++)
{
cl::Kernel kernel = cl::Kernel(CL_Program,"Stream1");
kernel.setArg(0,bF [il]);
kernel.setArg(1,bFtemp [il]);
kernel.setArg(2,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
kernel = cl::Kernel(CL_Program,"Stream2");
kernel.setArg(0,bIsSolid[il]);
kernel.setArg(1,bF [il]);
kernel.setArg(2,bFtemp [il]);
kernel.setArg(3,bBForce [il]);
kernel.setArg(4,bVel [il]);
kernel.setArg(5,bRho [il]);
kernel.setArg(6,blbmaux );
CL_Queue.enqueueNDRangeKernel(kernel,cl::NullRange,cl::NDRange(Ncells),cl::NullRange);
CL_Queue.finish();
}
}
#endif
inline void Domain::Solve(double Tf, double dtOut, ptDFun_t ptSetup, ptDFun_t ptReport,
char const * TheFileKey, bool RenderVideo, size_t TheNproc)
{
#ifdef USE_OCL
std::vector<cl::Platform> all_platforms;
cl::Platform::get(&all_platforms);
if (all_platforms.size()==0)
{
throw new Fatal("FLBM::Domain: There are no GPUs or APUs, please compile with the A_USE_OCL flag turned off");
}
cl::Platform default_platform=all_platforms[0];
std::vector<cl::Device> all_devices;
default_platform.getDevices(CL_DEVICE_TYPE_ALL, &all_devices);
if (all_devices.size()==0)
{
throw new Fatal("FLBM::Domain: There are no GPUs or APUs, please compile with the A_USE_OCL flag turned off");
}
CL_Device = all_devices[0];
CL_Context = cl::Context(CL_Device);
cl::Program::Sources sources;
char* pMECHSYS_ROOT;
pMECHSYS_ROOT = getenv ("MECHSYS_ROOT");
if (pMECHSYS_ROOT==NULL) pMECHSYS_ROOT = getenv ("HOME");
String pCL;
pCL.Printf("%s/mechsys/lib/flbm/lbm.cl",pMECHSYS_ROOT);
std::ifstream infile(pCL.CStr(),std::ifstream::in);
std::string kernel_code((std::istreambuf_iterator<char>(infile)), std::istreambuf_iterator<char>());
sources.push_back({kernel_code.c_str(),kernel_code.length()});
CL_Program = cl::Program(CL_Context,sources);
if(CL_Program.build({CL_Device})!=CL_SUCCESS){
std::cout<<" Error building: "<<CL_Program.getBuildInfo<CL_PROGRAM_BUILD_LOG>(CL_Device)<<"\n";
exit(1);
}
CL_Queue = cl::CommandQueue(CL_Context,CL_Device);
N_Groups = CL_Device.getInfo<CL_DEVICE_MAX_WORK_GROUP_SIZE>();
#endif
idx_out = 0;
FileKey.Printf("%s",TheFileKey);
Nproc = TheNproc;
Util::Stopwatch stopwatch;
printf("\n%s--- Solving ---------------------------------------------------------------------%s\n",TERM_CLR1 , TERM_RST);
printf("%s Time step = %g%s\n" ,TERM_CLR2, dt , TERM_RST);
for (size_t i=0;i<Nl;i++)
{
printf("%s Tau of Lattice %zd = %g%s\n" ,TERM_CLR2, i, Tau[i] , TERM_RST);
}
#ifdef USE_OCL
//printf("%s Using GPU: = %c%s\n" ,TERM_CLR2, Default_Device.getInfo<CL_DEVICE_NAME>(), TERM_RST);
std::cout
<< TERM_CLR2
<< " Using GPU: = " << CL_Device.getInfo<CL_DEVICE_NAME>() << TERM_RST << std::endl;
#endif
{
Array<iVec3_t> CPP(0);
size_t nx = Ndim(0);
size_t ny = Ndim(1);
size_t nz = Ndim(2);
for (size_t iz=0;iz<nz;iz++)
for (size_t iy=0;iy<ny;iy++)
for (size_t ix=0;ix<nx;ix++)
{
size_t nc = Pt2idx(iVec3_t(ix,iy,iz),Ndim);
for (size_t k=1;k<Nneigh;k++)
{
size_t nix = (size_t)((int)ix + (int)C[k](0) + (int)Ndim(0))%Ndim(0);
size_t niy = (size_t)((int)iy + (int)C[k](1) + (int)Ndim(1))%Ndim(1);
size_t niz = (size_t)((int)iz + (int)C[k](2) + (int)Ndim(2))%Ndim(2);
size_t nb = Pt2idx(iVec3_t(nix,niy,niz),Ndim);
if (nb>nc)
{
CPP.Push(iVec3_t(nc,nb,k));
}
}
}
NCellPairs = CPP.Size();
CellPairs = new iVec3_t [NCellPairs];
for (size_t n=0;n<NCellPairs;n++)
{
CellPairs[n] = CPP[n];
}
}
//std::cout << "1" << std::endl;
#ifdef USE_OCL
UpLoadDevice();
#endif
//std::cout << "2" << std::endl;
double tout = Time;
while (Time < Tf)
{
if (ptSetup!=NULL) (*ptSetup) ((*this), UserData);
if (Time >= tout)
{
//std::cout << "3" << std::endl;
#ifdef USE_OCL
DnLoadDevice();
#endif
//std::cout << "4" << std::endl;
if (TheFileKey!=NULL)
{
String fn;
fn.Printf ("%s_%04d", TheFileKey, idx_out);
if ( RenderVideo)
{
#ifdef USE_HDF5
WriteXDMF(fn.CStr());
#else
//WriteVTK (fn.CStr());
#endif
}
if (ptReport!=NULL) (*ptReport) ((*this), UserData);
}
tout += dtOut;
idx_out++;
}
//The LBM dynamics
#ifdef USE_OCL
ApplyForceCL();
CollideCL();
StreamCL();
#else
if (Nl==1)
{
if (fabs(G[0])>1.0e-12) ApplyForcesSC();
CollideSC();
StreamSC();
}
else
{
if ((fabs(G[0])>1.0e-12)||(fabs(G[1])>1.0e-12)) ApplyForcesSCMP();
else ApplyForcesMP();
CollideMP();
StreamMP();
}
#endif
Time += dt;
//std::cout << Time << std::endl;
}
}
const double Domain::WEIGHTSD2Q5 [ 5] = { 2./6., 1./6., 1./6., 1./6., 1./6 };
const double Domain::WEIGHTSD2Q9 [ 9] = { 4./9., 1./9., 1./9., 1./9., 1./9., 1./36., 1./36., 1./36., 1./36. };
const double Domain::WEIGHTSD3Q15 [15] = { 2./9., 1./9., 1./9., 1./9., 1./9., 1./9., 1./9., 1./72., 1./72. , 1./72., 1./72., 1./72., 1./72., 1./72., 1./72.};
const double Domain::WEIGHTSD3Q19 [19] = { 1./3., 1./18., 1./18., 1./18., 1./18., 1./18., 1./18., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36., 1./36.};
const size_t Domain::OPPOSITED2Q5 [ 5] = { 0, 3, 4, 1, 2 }; ///< Opposite directions (D2Q5)
const size_t Domain::OPPOSITED2Q9 [ 9] = { 0, 3, 4, 1, 2, 7, 8, 5, 6 }; ///< Opposite directions (D2Q9)
const size_t Domain::OPPOSITED3Q15 [15] = { 0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13}; ///< Opposite directions (D3Q15)
const size_t Domain::OPPOSITED3Q19 [19] = { 0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17}; ///< Opposite directions (D3Q19)
const Vec3_t Domain::LVELOCD2Q5 [ 5] = { {0,0,0}, {1,0,0}, {0,1,0}, {-1,0,0}, {0,-1,0} };
const Vec3_t Domain::LVELOCD2Q9 [ 9] = { {0,0,0}, {1,0,0}, {0,1,0}, {-1,0,0}, {0,-1,0}, {1,1,0}, {-1,1,0}, {-1,-1,0}, {1,-1,0} };
const Vec3_t Domain::LVELOCD3Q15 [15] =
{
{ 0, 0, 0}, { 1, 0, 0}, {-1, 0, 0}, { 0, 1, 0}, { 0,-1, 0},
{ 0, 0, 1}, { 0, 0,-1}, { 1, 1, 1}, {-1,-1,-1}, { 1, 1,-1},
{-1,-1, 1}, { 1,-1, 1}, {-1, 1,-1}, { 1,-1,-1}, {-1, 1, 1}
};
const Vec3_t Domain::LVELOCD3Q19 [19] =
{
{ 0, 0, 0},
{ 1, 0, 0}, {-1, 0, 0}, { 0, 1, 0}, { 0,-1, 0}, { 0, 0, 1}, { 0, 0,-1},
{ 1, 1, 0}, {-1,-1, 0}, { 1,-1, 0}, {-1, 1, 0}, { 1, 0, 1}, {-1, 0,-1},
{ 1, 0,-1}, {-1, 0, 1}, { 0, 1, 1}, { 0,-1,-1}, { 0, 1,-1}, { 0,-1, 1}
};
}
#endif
|
conv1x1s1_sgemm_neon_sgemm.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#include "option.h"
#include "mat.h"
namespace ncnn{
static void conv1x1s1_sgemm_neon_sgemm(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt,
int w, int h, int inch, int outch)
{
const int size = w * h;
Mat tmp = bottom_blob;
const float* bias = _bias;
int nn_outch = 0;
int remain_outch_start = 0;
#if __ARM_NEON && __aarch64__
nn_outch = outch >> 3;
remain_outch_start = nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = pp * 8;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p+1);
float* outptr2 = top_blob.channel(p+2);
float* outptr3 = top_blob.channel(p+3);
float* outptr4 = top_blob.channel(p+4);
float* outptr5 = top_blob.channel(p+5);
float* outptr6 = top_blob.channel(p+6);
float* outptr7 = top_blob.channel(p+7);
const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i+7<size; i+=8)
{
const float* tmpptr = tmp.channel(i/8);
const float* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[0] \n"
"dup v18.4s, v0.s[1] \n"
"dup v19.4s, v0.s[1] \n"
"dup v20.4s, v0.s[2] \n"
"dup v21.4s, v0.s[2] \n"
"dup v22.4s, v0.s[3] \n"
"dup v23.4s, v0.s[3] \n"
"dup v24.4s, v1.s[0] \n"
"dup v25.4s, v1.s[0] \n"
"dup v26.4s, v1.s[1] \n"
"dup v27.4s, v1.s[1] \n"
"dup v28.4s, v1.s[2] \n"
"dup v29.4s, v1.s[2] \n"
"dup v30.4s, v1.s[3] \n"
"dup v31.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%8], #64 \n"
"fmla v16.4s, v10.4s, v2.s[0] \n"
"fmla v18.4s, v10.4s, v2.s[1] \n"
"fmla v20.4s, v10.4s, v2.s[2] \n"
"fmla v22.4s, v10.4s, v2.s[3] \n"
"fmla v17.4s, v11.4s, v2.s[0] \n"
"fmla v19.4s, v11.4s, v2.s[1] \n"
"fmla v21.4s, v11.4s, v2.s[2] \n"
"fmla v23.4s, v11.4s, v2.s[3] \n"
"fmla v24.4s, v10.4s, v3.s[0] \n"
"fmla v26.4s, v10.4s, v3.s[1] \n"
"fmla v28.4s, v10.4s, v3.s[2] \n"
"fmla v30.4s, v10.4s, v3.s[3] \n"
"fmla v25.4s, v11.4s, v3.s[0] \n"
"fmla v27.4s, v11.4s, v3.s[1] \n"
"fmla v29.4s, v11.4s, v3.s[2] \n"
"fmla v31.4s, v11.4s, v3.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v12.4s, v4.s[0] \n"
"fmla v18.4s, v12.4s, v4.s[1] \n"
"fmla v20.4s, v12.4s, v4.s[2] \n"
"fmla v22.4s, v12.4s, v4.s[3] \n"
"fmla v17.4s, v13.4s, v4.s[0] \n"
"fmla v19.4s, v13.4s, v4.s[1] \n"
"fmla v21.4s, v13.4s, v4.s[2] \n"
"fmla v23.4s, v13.4s, v4.s[3] \n"
"fmla v24.4s, v12.4s, v5.s[0] \n"
"fmla v26.4s, v12.4s, v5.s[1] \n"
"fmla v28.4s, v12.4s, v5.s[2] \n"
"fmla v30.4s, v12.4s, v5.s[3] \n"
"fmla v25.4s, v13.4s, v5.s[0] \n"
"fmla v27.4s, v13.4s, v5.s[1] \n"
"fmla v29.4s, v13.4s, v5.s[2] \n"
"fmla v31.4s, v13.4s, v5.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v14.4s, v6.s[0] \n"
"fmla v18.4s, v14.4s, v6.s[1] \n"
"fmla v20.4s, v14.4s, v6.s[2] \n"
"fmla v22.4s, v14.4s, v6.s[3] \n"
"fmla v17.4s, v15.4s, v6.s[0] \n"
"fmla v19.4s, v15.4s, v6.s[1] \n"
"fmla v21.4s, v15.4s, v6.s[2] \n"
"fmla v23.4s, v15.4s, v6.s[3] \n"
"fmla v24.4s, v14.4s, v7.s[0] \n"
"fmla v26.4s, v14.4s, v7.s[1] \n"
"fmla v28.4s, v14.4s, v7.s[2] \n"
"fmla v30.4s, v14.4s, v7.s[3] \n"
"fmla v25.4s, v15.4s, v7.s[0] \n"
"fmla v27.4s, v15.4s, v7.s[1] \n"
"fmla v29.4s, v15.4s, v7.s[2] \n"
"fmla v31.4s, v15.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #256] \n"
"ld1 {v8.4s, v9.4s}, [%8], #32 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v18.4s, v8.4s, v0.s[1] \n"
"fmla v20.4s, v8.4s, v0.s[2] \n"
"fmla v22.4s, v8.4s, v0.s[3] \n"
"fmla v17.4s, v9.4s, v0.s[0] \n"
"fmla v19.4s, v9.4s, v0.s[1] \n"
"fmla v21.4s, v9.4s, v0.s[2] \n"
"fmla v23.4s, v9.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v1.s[0] \n"
"fmla v26.4s, v8.4s, v1.s[1] \n"
"fmla v28.4s, v8.4s, v1.s[2] \n"
"fmla v30.4s, v8.4s, v1.s[3] \n"
"fmla v25.4s, v9.4s, v1.s[0] \n"
"fmla v27.4s, v9.4s, v1.s[1] \n"
"fmla v29.4s, v9.4s, v1.s[2] \n"
"fmla v31.4s, v9.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s, v17.4s}, [%0], #32 \n"
"st1 {v18.4s, v19.4s}, [%1], #32 \n"
"st1 {v20.4s, v21.4s}, [%2], #32 \n"
"st1 {v22.4s, v23.4s}, [%3], #32 \n"
"st1 {v24.4s, v25.4s}, [%4], #32 \n"
"st1 {v26.4s, v27.4s}, [%5], #32 \n"
"st1 {v28.4s, v29.4s}, [%6], #32 \n"
"st1 {v30.4s, v31.4s}, [%7], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"
);
}
for (; i+3<size; i+=4)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4);
const float* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v0.4s, v1.4s}, [%20] \n"
"dup v16.4s, v0.s[0] \n"
"dup v17.4s, v0.s[1] \n"
"dup v18.4s, v0.s[2] \n"
"dup v19.4s, v0.s[3] \n"
"dup v20.4s, v1.s[0] \n"
"dup v21.4s, v1.s[1] \n"
"dup v22.4s, v1.s[2] \n"
"dup v23.4s, v1.s[3] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%8, #512] \n"
"ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%8], #64 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"fmla v16.4s, v9.4s, v2.s[0] \n"
"fmla v17.4s, v9.4s, v2.s[1] \n"
"fmla v18.4s, v9.4s, v2.s[2] \n"
"fmla v19.4s, v9.4s, v2.s[3] \n"
"fmla v20.4s, v9.4s, v3.s[0] \n"
"fmla v21.4s, v9.4s, v3.s[1] \n"
"fmla v22.4s, v9.4s, v3.s[2] \n"
"fmla v23.4s, v9.4s, v3.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v16.4s, v10.4s, v4.s[0] \n"
"fmla v17.4s, v10.4s, v4.s[1] \n"
"fmla v18.4s, v10.4s, v4.s[2] \n"
"fmla v19.4s, v10.4s, v4.s[3] \n"
"fmla v20.4s, v10.4s, v5.s[0] \n"
"fmla v21.4s, v10.4s, v5.s[1] \n"
"fmla v22.4s, v10.4s, v5.s[2] \n"
"fmla v23.4s, v10.4s, v5.s[3] \n"
"fmla v16.4s, v11.4s, v6.s[0] \n"
"fmla v17.4s, v11.4s, v6.s[1] \n"
"fmla v18.4s, v11.4s, v6.s[2] \n"
"fmla v19.4s, v11.4s, v6.s[3] \n"
"fmla v20.4s, v11.4s, v7.s[0] \n"
"fmla v21.4s, v11.4s, v7.s[1] \n"
"fmla v22.4s, v11.4s, v7.s[2] \n"
"fmla v23.4s, v11.4s, v7.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"fmla v16.4s, v8.4s, v0.s[0] \n"
"fmla v17.4s, v8.4s, v0.s[1] \n"
"fmla v18.4s, v8.4s, v0.s[2] \n"
"fmla v19.4s, v8.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v8.4s, v1.s[0] \n"
"fmla v21.4s, v8.4s, v1.s[1] \n"
"fmla v22.4s, v8.4s, v1.s[2] \n"
"fmla v23.4s, v8.4s, v1.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v16.4s}, [%0], #16 \n"
"st1 {v17.4s}, [%1], #16 \n"
"st1 {v18.4s}, [%2], #16 \n"
"st1 {v19.4s}, [%3], #16 \n"
"st1 {v20.4s}, [%4], #16 \n"
"st1 {v21.4s}, [%5], #16 \n"
"st1 {v22.4s}, [%6], #16 \n"
"st1 {v23.4s}, [%7], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"
);
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
const float* kptr = kernel.channel(p/8);
asm volatile(
"ld1 {v24.4s, v25.4s}, [%20] \n"
// inch loop
"lsr w4, %w21, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"0: \n"
"prfm pldl1keep, [%8, #128] \n"
"ld1 {v8.4s}, [%8], #16 \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%9], #64 \n"
"fmla v16.4s, v0.4s, v8.s[0] \n"
"fmla v17.4s, v1.4s, v8.s[0] \n"
"fmla v18.4s, v2.4s, v8.s[1] \n"
"fmla v19.4s, v3.4s, v8.s[1] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%9], #64 \n"
"subs w4, w4, #1 \n"
"fmla v20.4s, v4.4s, v8.s[2] \n"
"fmla v21.4s, v5.4s, v8.s[2] \n"
"fmla v22.4s, v6.4s, v8.s[3] \n"
"fmla v23.4s, v7.4s, v8.s[3] \n"
"bne 0b \n"
"fadd v16.4s, v16.4s, v18.4s \n"
"fadd v17.4s, v17.4s, v19.4s \n"
"fadd v20.4s, v20.4s, v22.4s \n"
"fadd v21.4s, v21.4s, v23.4s \n"
"fadd v16.4s, v16.4s, v20.4s \n"
"fadd v17.4s, v17.4s, v21.4s \n"
"fadd v24.4s, v24.4s, v16.4s \n"
"fadd v25.4s, v25.4s, v17.4s \n"
"1: \n"
// remain loop
"and w4, %w21, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%8, #32] \n"
"ld1r {v8.4s}, [%8], #4 \n"
"prfm pldl1keep, [%9, #256] \n"
"ld1 {v0.4s, v1.4s}, [%9], #32 \n"
"subs w4, w4, #1 \n"
"fmla v24.4s, v8.4s, v0.4s \n"
"fmla v25.4s, v8.4s, v1.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v24.s}[0],[%0], #4 \n"
"st1 {v24.s}[1],[%1], #4 \n"
"st1 {v24.s}[2],[%2], #4 \n"
"st1 {v24.s}[3],[%3], #4 \n"
"st1 {v25.s}[0],[%4], #4 \n"
"st1 {v25.s}[1],[%5], #4 \n"
"st1 {v25.s}[2],[%6], #4 \n"
"st1 {v25.s}[3],[%7], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(outptr4), // %4
"=r"(outptr5), // %5
"=r"(outptr6), // %6
"=r"(outptr7), // %7
"=r"(tmpptr), // %8
"=r"(kptr) // %9
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(outptr4),
"5"(outptr5),
"6"(outptr6),
"7"(outptr7),
"8"(tmpptr),
"9"(kptr),
"r"(biasptr), // %20
"r"(inch) // %21
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25"
);
}
}
#endif // __ARM_NEON && __aarch64__
nn_outch = (outch - remain_outch_start) >> 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp=0; pp<nn_outch; pp++)
{
int p = remain_outch_start + pp * 4;
float* outptr0 = top_blob.channel(p);
float* outptr1 = top_blob.channel(p+1);
float* outptr2 = top_blob.channel(p+2);
float* outptr3 = top_blob.channel(p+3);
const float zeros[4] = {0.f, 0.f, 0.f, 0.f};
const float* biasptr = bias ? bias + p : zeros;
int i = 0;
for (; i+7<size; i+=8)
{
const float* tmpptr = tmp.channel(i/8);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const float* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[0] \n"
"dup v10.4s, v0.s[1] \n"
"dup v11.4s, v0.s[1] \n"
"dup v12.4s, v0.s[2] \n"
"dup v13.4s, v0.s[2] \n"
"dup v14.4s, v0.s[3] \n"
"dup v15.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%4], #64 \n"
"fmla v8.4s, v6.4s, v1.s[0] \n"
"fmla v10.4s, v6.4s, v1.s[1] \n"
"fmla v12.4s, v6.4s, v1.s[2] \n"
"fmla v14.4s, v6.4s, v1.s[3] \n"
"fmla v9.4s, v7.4s, v1.s[0] \n"
"fmla v11.4s, v7.4s, v1.s[1] \n"
"fmla v13.4s, v7.4s, v1.s[2] \n"
"fmla v15.4s, v7.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v16.4s, v2.s[0] \n"
"fmla v10.4s, v16.4s, v2.s[1] \n"
"fmla v12.4s, v16.4s, v2.s[2] \n"
"fmla v14.4s, v16.4s, v2.s[3] \n"
"fmla v9.4s, v17.4s, v2.s[0] \n"
"fmla v11.4s, v17.4s, v2.s[1] \n"
"fmla v13.4s, v17.4s, v2.s[2] \n"
"fmla v15.4s, v17.4s, v2.s[3] \n"
"fmla v8.4s, v18.4s, v3.s[0] \n"
"fmla v10.4s, v18.4s, v3.s[1] \n"
"fmla v12.4s, v18.4s, v3.s[2] \n"
"fmla v14.4s, v18.4s, v3.s[3] \n"
"fmla v9.4s, v19.4s, v3.s[0] \n"
"fmla v11.4s, v19.4s, v3.s[1] \n"
"fmla v13.4s, v19.4s, v3.s[2] \n"
"fmla v15.4s, v19.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #256] \n"
"ld1 {v4.4s, v5.4s}, [%4], #32 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v10.4s, v4.4s, v0.s[1] \n"
"fmla v12.4s, v4.4s, v0.s[2] \n"
"fmla v14.4s, v4.4s, v0.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"fmla v11.4s, v5.4s, v0.s[1] \n"
"fmla v13.4s, v5.4s, v0.s[2] \n"
"fmla v15.4s, v5.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
"st1 {v10.4s, v11.4s}, [%1], #32 \n"
"st1 {v12.4s, v13.4s}, [%2], #32 \n"
"st1 {v14.4s, v15.4s}, [%3], #32 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[0] \n"
"vdup.f32 q10, d0[1] \n"
"vdup.f32 q11, d0[1] \n"
"vdup.f32 q12, d1[0] \n"
"vdup.f32 q13, d1[0] \n"
"vdup.f32 q14, d1[1] \n"
"vdup.f32 q15, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"vmla.f32 q8, q6, d2[0] \n"
"vmla.f32 q10, q6, d2[1] \n"
"vmla.f32 q12, q6, d3[0] \n"
"vmla.f32 q14, q6, d3[1] \n"
"vmla.f32 q9, q7, d2[0] \n"
"vmla.f32 q11, q7, d2[1] \n"
"vmla.f32 q13, q7, d3[0] \n"
"vmla.f32 q15, q7, d3[1] \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"vmla.f32 q8, q4, d4[0] \n"
"vmla.f32 q10, q4, d4[1] \n"
"vmla.f32 q12, q4, d5[0] \n"
"vmla.f32 q14, q4, d5[1] \n"
"vmla.f32 q9, q5, d4[0] \n"
"vmla.f32 q11, q5, d4[1] \n"
"vmla.f32 q13, q5, d5[0] \n"
"vmla.f32 q15, q5, d5[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d6[0] \n"
"vmla.f32 q10, q6, d6[1] \n"
"vmla.f32 q12, q6, d7[0] \n"
"vmla.f32 q14, q6, d7[1] \n"
"vmla.f32 q9, q7, d6[0] \n"
"vmla.f32 q11, q7, d6[1] \n"
"vmla.f32 q13, q7, d7[0] \n"
"vmla.f32 q15, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #256] \n"
"vld1.f32 {d8-d11}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q10, q4, d0[1] \n"
"vmla.f32 q12, q4, d1[0] \n"
"vmla.f32 q14, q4, d1[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q9, q5, d0[0] \n"
"vmla.f32 q11, q5, d0[1] \n"
"vmla.f32 q13, q5, d1[0] \n"
"vmla.f32 q15, q5, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
"vst1.f32 {d20-d23}, [%1 :128]! \n"
"vst1.f32 {d24-d27}, [%2 :128]! \n"
"vst1.f32 {d28-d31}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum0_4 = biasptr[0];
float sum0_5 = biasptr[0];
float sum0_6 = biasptr[0];
float sum0_7 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum1_4 = biasptr[1];
float sum1_5 = biasptr[1];
float sum1_6 = biasptr[1];
float sum1_7 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum2_4 = biasptr[2];
float sum2_5 = biasptr[2];
float sum2_6 = biasptr[2];
float sum2_7 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
float sum3_4 = biasptr[3];
float sum3_5 = biasptr[3];
float sum3_6 = biasptr[3];
float sum3_7 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum0_4 += tmpptr[4] * kptr[0];
sum0_5 += tmpptr[5] * kptr[0];
sum0_6 += tmpptr[6] * kptr[0];
sum0_7 += tmpptr[7] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum1_4 += tmpptr[4] * kptr[1];
sum1_5 += tmpptr[5] * kptr[1];
sum1_6 += tmpptr[6] * kptr[1];
sum1_7 += tmpptr[7] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum2_4 += tmpptr[4] * kptr[2];
sum2_5 += tmpptr[5] * kptr[2];
sum2_6 += tmpptr[6] * kptr[2];
sum2_7 += tmpptr[7] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
sum3_4 += tmpptr[4] * kptr[3];
sum3_5 += tmpptr[5] * kptr[3];
sum3_6 += tmpptr[6] * kptr[3];
sum3_7 += tmpptr[7] * kptr[3];
tmpptr += 8;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr0[4] = sum0_4;
outptr0[5] = sum0_5;
outptr0[6] = sum0_6;
outptr0[7] = sum0_7;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr1[4] = sum1_4;
outptr1[5] = sum1_5;
outptr1[6] = sum1_6;
outptr1[7] = sum1_7;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr2[4] = sum2_4;
outptr2[5] = sum2_5;
outptr2[6] = sum2_6;
outptr2[7] = sum2_7;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr3[4] = sum3_4;
outptr3[5] = sum3_5;
outptr3[6] = sum3_6;
outptr3[7] = sum3_7;
outptr0 += 8;
outptr1 += 8;
outptr2 += 8;
outptr3 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const float* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v0.4s}, [%12] \n"
"dup v8.4s, v0.s[0] \n"
"dup v9.4s, v0.s[1] \n"
"dup v10.4s, v0.s[2] \n"
"dup v11.4s, v0.s[3] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%4, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"fmla v8.4s, v5.4s, v1.s[0] \n"
"fmla v9.4s, v5.4s, v1.s[1] \n"
"fmla v10.4s, v5.4s, v1.s[2] \n"
"fmla v11.4s, v5.4s, v1.s[3] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v6.4s, v2.s[0] \n"
"fmla v9.4s, v6.4s, v2.s[1] \n"
"fmla v10.4s, v6.4s, v2.s[2] \n"
"fmla v11.4s, v6.4s, v2.s[3] \n"
"fmla v8.4s, v7.4s, v3.s[0] \n"
"fmla v9.4s, v7.4s, v3.s[1] \n"
"fmla v10.4s, v7.4s, v3.s[2] \n"
"fmla v11.4s, v7.4s, v3.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v4.4s, v0.s[1] \n"
"fmla v10.4s, v4.4s, v0.s[2] \n"
"fmla v11.4s, v4.4s, v0.s[3] \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
"st1 {v9.4s}, [%1], #16 \n"
"st1 {v10.4s}, [%2], #16 \n"
"st1 {v11.4s}, [%3], #16 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d0-d1}, [%12] \n"
"vdup.f32 q8, d0[0] \n"
"vdup.f32 q9, d0[1] \n"
"vdup.f32 q10, d1[0] \n"
"vdup.f32 q11, d1[1] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%4, #512] \n"
"vldm %4!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%4 :128]! \n"
// "vld1.f32 {d12-d15}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"vmla.f32 q8, q5, d2[0] \n"
"vmla.f32 q9, q5, d2[1] \n"
"vmla.f32 q10, q5, d3[0] \n"
"vmla.f32 q11, q5, d3[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q6, d4[0] \n"
"vmla.f32 q9, q6, d4[1] \n"
"vmla.f32 q10, q6, d5[0] \n"
"vmla.f32 q11, q6, d5[1] \n"
"vmla.f32 q8, q7, d6[0] \n"
"vmla.f32 q9, q7, d6[1] \n"
"vmla.f32 q10, q7, d7[0] \n"
"vmla.f32 q11, q7, d7[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q4, d0[1] \n"
"vmla.f32 q10, q4, d1[0] \n"
"vmla.f32 q11, q4, d1[1] \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
"vst1.f32 {d18-d19}, [%1 :128]! \n"
"vst1.f32 {d20-d21}, [%2 :128]! \n"
"vst1.f32 {d22-d23}, [%3 :128]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"
);
#endif // __aarch64__
#else
float sum0_0 = biasptr[0];
float sum0_1 = biasptr[0];
float sum0_2 = biasptr[0];
float sum0_3 = biasptr[0];
float sum1_0 = biasptr[1];
float sum1_1 = biasptr[1];
float sum1_2 = biasptr[1];
float sum1_3 = biasptr[1];
float sum2_0 = biasptr[2];
float sum2_1 = biasptr[2];
float sum2_2 = biasptr[2];
float sum2_3 = biasptr[2];
float sum3_0 = biasptr[3];
float sum3_1 = biasptr[3];
float sum3_2 = biasptr[3];
float sum3_3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0_0 += tmpptr[0] * kptr[0];
sum0_1 += tmpptr[1] * kptr[0];
sum0_2 += tmpptr[2] * kptr[0];
sum0_3 += tmpptr[3] * kptr[0];
sum1_0 += tmpptr[0] * kptr[1];
sum1_1 += tmpptr[1] * kptr[1];
sum1_2 += tmpptr[2] * kptr[1];
sum1_3 += tmpptr[3] * kptr[1];
sum2_0 += tmpptr[0] * kptr[2];
sum2_1 += tmpptr[1] * kptr[2];
sum2_2 += tmpptr[2] * kptr[2];
sum2_3 += tmpptr[3] * kptr[2];
sum3_0 += tmpptr[0] * kptr[3];
sum3_1 += tmpptr[1] * kptr[3];
sum3_2 += tmpptr[2] * kptr[3];
sum3_3 += tmpptr[3] * kptr[3];
tmpptr += 4;
kptr += 4;
}
outptr0[0] = sum0_0;
outptr0[1] = sum0_1;
outptr0[2] = sum0_2;
outptr0[3] = sum0_3;
outptr1[0] = sum1_0;
outptr1[1] = sum1_1;
outptr1[2] = sum1_2;
outptr1[3] = sum1_3;
outptr2[0] = sum2_0;
outptr2[1] = sum2_1;
outptr2[2] = sum2_2;
outptr2[3] = sum2_3;
outptr3[0] = sum3_0;
outptr3[1] = sum3_1;
outptr3[2] = sum3_2;
outptr3[3] = sum3_3;
outptr0 += 4;
outptr1 += 4;
outptr2 += 4;
outptr3 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4);
#else
const float* kptr = kernel.channel(p/4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"ld1 {v12.4s}, [%12] \n"
// inch loop
"lsr w4, %w13, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"eor v8.16b, v8.16b, v8.16b \n"
"eor v9.16b, v9.16b, v9.16b \n"
"eor v10.16b, v10.16b, v10.16b \n"
"eor v11.16b, v11.16b, v11.16b \n"
"0: \n"
"prfm pldl1keep, [%4, #128] \n"
"ld1 {v4.4s}, [%4], #16 \n"
"prfm pldl1keep, [%5, #512] \n"
"ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%5], #64 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v0.4s, v4.s[0] \n"
"fmla v9.4s, v1.4s, v4.s[1] \n"
"fmla v10.4s, v2.4s, v4.s[2] \n"
"fmla v11.4s, v3.4s, v4.s[3] \n"
"bne 0b \n"
"fadd v8.4s, v8.4s, v9.4s \n"
"fadd v10.4s, v10.4s, v11.4s \n"
"fadd v8.4s, v8.4s, v10.4s \n"
"fadd v12.4s, v12.4s, v8.4s \n"
"1: \n"
// remain loop
"and w4, %w13, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%4, #32] \n"
"ld1r {v4.4s}, [%4], #4 \n"
"prfm pldl1keep, [%5, #128] \n"
"ld1 {v0.4s}, [%5], #16 \n"
"subs w4, w4, #1 \n"
"fmla v12.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v12.s}[0], [%0], #4 \n"
"st1 {v12.s}[1], [%1], #4 \n"
"st1 {v12.s}[2], [%2], #4 \n"
"st1 {v12.s}[3], [%3], #4 \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "x4", "v0", "v1", "v2", "v3", "v4", "v8", "v9", "v10", "v11", "v12"
);
#else // __aarch64__
asm volatile(
"vld1.f32 {d24-d25}, [%12] \n"
// inch loop
"lsr r4, %13, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"veor q8, q8, q8 \n"
"veor q9, q9, q9 \n"
"veor q10, q10, q10 \n"
"veor q11, q11, q11 \n"
"0: \n"
"pld [%4, #128] \n"
"vld1.f32 {d8-d9}, [%4 :128]! \n"
"pld [%5, #512] \n"
"vldm %5!, {d0-d7} \n"
// "vld1.f32 {d0-d3}, [%5 :128]! \n"
// "vld1.f32 {d4-d7}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q0, d8[0] \n"
"vmla.f32 q9, q1, d8[1] \n"
"vmla.f32 q10, q2, d9[0] \n"
"vmla.f32 q11, q3, d9[1] \n"
"bne 0b \n"
"vadd.f32 q8, q8, q9 \n"
"vadd.f32 q10, q10, q11 \n"
"vadd.f32 q8, q8, q10 \n"
"vadd.f32 q12, q12, q8 \n"
"1: \n"
// remain loop
"and r4, %13, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%4, #32] \n"
"vld1.f32 {d8[],d9[]}, [%4]! \n"
"pld [%5, #128] \n"
"vld1.f32 {d0-d1}, [%5 :128]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q12, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d24[0]}, [%0]! \n"
"vst1.f32 {d24[1]}, [%1]! \n"
"vst1.f32 {d25[0]}, [%2]! \n"
"vst1.f32 {d25[1]}, [%3]! \n"
: "=r"(outptr0), // %0
"=r"(outptr1), // %1
"=r"(outptr2), // %2
"=r"(outptr3), // %3
"=r"(tmpptr), // %4
"=r"(kptr) // %5
: "0"(outptr0),
"1"(outptr1),
"2"(outptr2),
"3"(outptr3),
"4"(tmpptr),
"5"(kptr),
"r"(biasptr), // %12
"r"(inch) // %13
: "cc", "memory", "r4", "q0", "q1", "q2", "q3", "q4", "q8", "q9", "q10", "q11", "q12"
);
#endif // __aarch64__
#else
float sum0 = biasptr[0];
float sum1 = biasptr[1];
float sum2 = biasptr[2];
float sum3 = biasptr[3];
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[0] * kptr[1];
sum2 += tmpptr[0] * kptr[2];
sum3 += tmpptr[0] * kptr[3];
tmpptr++;
kptr += 4;
}
outptr0[0] = sum0;
outptr1[0] = sum1;
outptr2[0] = sum2;
outptr3[0] = sum3;
outptr0++;
outptr1++;
outptr2++;
outptr3++;
#endif // __ARM_NEON
}
}
remain_outch_start += nn_outch << 2;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=remain_outch_start; p<outch; p++)
{
Mat out0 = top_blob.channel(p);
const float bias0 = bias ? bias[p] : 0.f;
float* outptr0 = out0;
int i = 0;
for (; i+7<size; i+=8)
{
const float* tmpptr = tmp.channel(i/8);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const float* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
"dup v9.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v9.4s, v5.4s, v0.s[0] \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n"
"fmla v8.4s, v6.4s, v0.s[1] \n"
"fmla v9.4s, v7.4s, v0.s[1] \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v12.4s, v0.s[2] \n"
"fmla v9.4s, v13.4s, v0.s[2] \n"
"fmla v8.4s, v14.4s, v0.s[3] \n"
"fmla v9.4s, v15.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #256] \n"
"ld1 {v4.4s, v5.4s}, [%1], #32 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"fmla v9.4s, v5.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s, v9.4s}, [%0], #32 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8", "v9", "v12", "v13", "v14", "v15"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
"vdup.f32 q9, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2 :128]! \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q9, q5, d0[0] \n"
"pld [%1, #512] \n"
"vldm %1!, {d24-d31} \n"
// "vld1.f32 {d24-d27}, [%1 :128]! \n"
// "vld1.f32 {d28-d31}, [%1 :128]! \n"
"vmla.f32 q8, q6, d0[1] \n"
"vmla.f32 q9, q7, d0[1] \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q12, d1[0] \n"
"vmla.f32 q9, q13, d1[0] \n"
"vmla.f32 q8, q14, d1[1] \n"
"vmla.f32 q9, q15, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #256] \n"
"vld1.f32 {d8-d11}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"vmla.f32 q9, q5, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d19}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8", "q9", "q12", "q13", "q14", "q15"
);
#endif // __aarch64__
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
float sum4 = bias0;
float sum5 = bias0;
float sum6 = bias0;
float sum7 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
sum4 += tmpptr[4] * kptr[0];
sum5 += tmpptr[5] * kptr[0];
sum6 += tmpptr[6] * kptr[0];
sum7 += tmpptr[7] * kptr[0];
tmpptr += 8;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0[4] = sum4;
outptr0[5] = sum5;
outptr0[6] = sum6;
outptr0[7] = sum7;
outptr0 += 8;
#endif // __ARM_NEON
}
for (; i+3<size; i+=4)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const float* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
#if __ARM_NEON
#if __aarch64__
asm volatile(
"dup v8.4s, %w6 \n"
// inch loop
"lsr w4, %w7, #2 \n"// w4 = nn = inch >> 2
"cmp w4, #0 \n"
"beq 1f \n"
"0: \n"
"prfm pldl1keep, [%1, #512] \n"
"ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n"
"prfm pldl1keep, [%2, #128] \n"
"ld1 {v0.4s}, [%2], #16 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.s[0] \n"
"fmla v8.4s, v5.4s, v0.s[1] \n"
"fmla v8.4s, v6.4s, v0.s[2] \n"
"fmla v8.4s, v7.4s, v0.s[3] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and w4, %w7, #3 \n"// w4 = remain = inch & 3;
"cmp w4, #0 \n"
"beq 3f \n"
"2: \n"
"prfm pldl1keep, [%1, #128] \n"
"ld1 {v4.4s}, [%1], #16 \n"
"prfm pldl1keep, [%2, #32] \n"
"ld1r {v0.4s}, [%2], #4 \n"
"subs w4, w4, #1 \n"
"fmla v8.4s, v4.4s, v0.4s \n"
"bne 2b \n"
"3: \n"
"st1 {v8.4s}, [%0], #16 \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "x4", "v0", "v4", "v5", "v6", "v7", "v8"
);
#else // __aarch64__
asm volatile(
"vdup.f32 q8, %6 \n"
// inch loop
"lsr r4, %7, #2 \n"// r4 = nn = inch >> 2
"cmp r4, #0 \n"
"beq 1f \n"
"0: \n"
"pld [%1, #512] \n"
"vldm %1!, {d8-d15} \n"
// "vld1.f32 {d8-d11}, [%1 :128]! \n"
// "vld1.f32 {d12-d15}, [%1 :128]! \n"
"pld [%2, #128] \n"
"vld1.f32 {d0-d1}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, d0[0] \n"
"vmla.f32 q8, q5, d0[1] \n"
"vmla.f32 q8, q6, d1[0] \n"
"vmla.f32 q8, q7, d1[1] \n"
"bne 0b \n"
"1: \n"
// remain loop
"and r4, %7, #3 \n"// r4 = remain = inch & 3;
"cmp r4, #0 \n"
"beq 3f \n"
"2: \n"
"pld [%1, #128] \n"
"vld1.f32 {d8-d9}, [%1 :128]! \n"
"pld [%2, #32] \n"
"vld1.f32 {d0[],d1[]}, [%2]! \n"
"subs r4, r4, #1 \n"
"vmla.f32 q8, q4, q0 \n"
"bne 2b \n"
"3: \n"
"vst1.f32 {d16-d17}, [%0 :128]! \n"
: "=r"(outptr0), // %0
"=r"(tmpptr), // %1
"=r"(kptr) // %2
: "0"(outptr0),
"1"(tmpptr),
"2"(kptr),
"r"(bias0), // %6
"r"(inch) // %7
: "cc", "memory", "r4", "q0", "q4", "q5", "q6", "q7", "q8"
);
#endif // __aarch64__
#else
float sum0 = bias0;
float sum1 = bias0;
float sum2 = bias0;
float sum3 = bias0;
for (int q=0; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
sum1 += tmpptr[1] * kptr[0];
sum2 += tmpptr[2] * kptr[0];
sum3 += tmpptr[3] * kptr[0];
tmpptr += 4;
kptr++;
}
outptr0[0] = sum0;
outptr0[1] = sum1;
outptr0[2] = sum2;
outptr0[3] = sum3;
outptr0 += 4;
#endif // __ARM_NEON
}
for (; i<size; i++)
{
const float* tmpptr = tmp.channel(i/8 + (i%8)/4 + i%4);
#if __ARM_NEON && __aarch64__
const float* kptr = kernel.channel(p/8 + (p%8)/4 + p%4);
#else
const float* kptr = kernel.channel(p/4 + p%4);
#endif // __ARM_NEON && __aarch64__
int q = 0;
#if __ARM_NEON
float32x4_t _sum0 = vdupq_n_f32(0.f);
for (; q+3<inch; q+=4)
{
float32x4_t _p0 = vld1q_f32(tmpptr);
tmpptr += 4;
float32x4_t _k0 = vld1q_f32(kptr);
kptr += 4;
#if __aarch64__
_sum0 = vfmaq_f32(_sum0, _p0, _k0);
#else
_sum0 = vmlaq_f32(_sum0, _p0, _k0);
#endif
}
#if __aarch64__
float sum0 = bias0 + vaddvq_f32(_sum0);
#else
float32x2_t _ss = vadd_f32(vget_low_f32(_sum0), vget_high_f32(_sum0));
float sum0 = bias0 + vget_lane_f32(vpadd_f32(_ss, _ss), 0);
#endif
#else
float sum0 = bias0;
#endif // __ARM_NEON
for (; q<inch; q++)
{
sum0 += tmpptr[0] * kptr[0];
tmpptr++;
kptr++;
}
outptr0[0] = sum0;
outptr0++;
}
}
// // NOTE sgemm
// for (; p<outch; p++)
// {
// Mat out0 = top_blob.channel(p);
//
// const float bias0 = bias ? bias[p] : 0.f;
//
// float* outptr0 = out0;
//
// for (int i=0; i<size; i++)
// {
// float sum = bias0;
//
// const float* kptr = _kernel.channel(p/8 + p%8);
//
// for (int q=0; q<inch; q++)
// {
// const float* img0 = bottom_blob.channel(q);
//
// sum += img0[i] * kptr[0];
// kptr ++;
// }
//
// outptr0[i] = sum;
// }
// }
}
}
|
calc_dnn.c | /*
* Copyright (c) 1991-2013 Kawahara Lab., Kyoto University
* Copyright (c) 2000-2005 Shikano Lab., Nara Institute of Science and Technology
* Copyright (c) 2005-2013 Julius project team, Nagoya Institute of Technology
* All rights reserved
*/
/* define this to test disabling expsum computation at softmax */
#undef NO_SUM_COMPUTATION
#ifdef _WIN32
#include <intrin.h>
#else
#if defined(__arm__) || TARGET_OS_IPHONE || defined(__aarch64__) || defined(__EMSCRIPTEN__)
#else
#include <cpuid.h>
#endif
#endif /* _WIN32 */
#include <sent/stddefs.h>
#include <sent/htk_hmm.h>
#include <sent/htk_param.h>
#include <sent/hmm.h>
#include <sent/hmm_calc.h>
#if defined(HAS_SIMD_FMA) || defined(HAS_SIMD_AVX) || defined(HAS_SIMD_SSE) || defined(HAS_SIMD_NEON) || defined(HAS_SIMD_NEONV2)
#define SIMD_ENABLED
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#endif
static int use_simd = USE_SIMD_NONE;
/************************************************************************/
/* determine which SIMD code to run */
#ifdef SIMD_ENABLED
static void cpu_id_check()
{
int cpuinfo[4];
boolean sse = FALSE, avx = FALSE, fma = FALSE;
use_simd = USE_SIMD_NONE;
#if defined(__arm__) || TARGET_OS_IPHONE
/* on ARM NEON */
#if defined(HAS_SIMD_NEONV2)
use_simd = USE_SIMD_NEONV2;
#elif defined(HAS_SIMD_NEON)
use_simd = USE_SIMD_NEON;
#else
use_simd = USE_SIMD_NONE;
#endif
#else /* ~__arm__ */
#ifdef _WIN32
__cpuid(cpuinfo, 0x00000001);
if(cpuinfo[3] & (1 << 25)) {
sse = TRUE;
}
if(cpuinfo[2] & (1 << 28)) {
avx = TRUE;
}
if(cpuinfo[2] & (1 << 12)) {
fma = TRUE;
}
#else /* ~_WIN32 */
unsigned int eax, ebx, ecx, edx;
if (__get_cpuid(1, &eax, &ebx, &ecx, &edx) == 0)
return;
if (edx & bit_SSE) {
sse = TRUE;
}
if (ecx & bit_AVX) {
avx = TRUE;
}
if (ecx & bit_FMA) {
fma = TRUE;
}
#endif /* _WIN32 */
#ifdef HAS_SIMD_FMA
if (fma == TRUE) {
use_simd = USE_SIMD_FMA;
return;
}
#endif
#ifdef HAS_SIMD_AVX
if (avx == TRUE) {
use_simd = USE_SIMD_AVX;
return;
}
#endif
#ifdef HAS_SIMD_SSE
if (sse == TRUE) {
use_simd = USE_SIMD_SSE;
return;
}
#endif
#endif /* ~__arm__ */
}
static void *mymalloc_simd_aligned(size_t size)
{
void *ptr;
switch(use_simd) {
case USE_SIMD_FMA:
case USE_SIMD_AVX:
ptr = mymalloc_aligned(size, 32);
break;
case USE_SIMD_SSE:
case USE_SIMD_NEON:
case USE_SIMD_NEONV2:
ptr = mymalloc_aligned(size, 16);
break;
default:
ptr = mymalloc(size);
break;
}
return ptr;
}
static void myfree_simd_aligned(void *ptr)
{
switch(use_simd) {
case USE_SIMD_FMA:
case USE_SIMD_AVX:
case USE_SIMD_SSE:
case USE_SIMD_NEON:
case USE_SIMD_NEONV2:
if (ptr != NULL) myfree_aligned(ptr);
break;
default:
if (ptr != NULL) free(ptr);
break;
}
}
#endif /* SIMD_ENABLED */
void get_builtin_simd_string(char *buf)
{
buf[0] = '\0';
#ifdef HAS_SIMD_NEON
strcat(buf, " NEON");
#endif
#ifdef HAS_SIMD_NEONV2
strcat(buf, " NEONv2");
#endif
#ifdef HAS_SIMD_SSE
strcat(buf, " SSE");
#endif
#ifdef HAS_SIMD_AVX
strcat(buf, " AVX");
#endif
#ifdef HAS_SIMD_FMA
strcat(buf, " FMA");
#endif
}
int check_avail_simd()
{
#ifdef SIMD_ENABLED
cpu_id_check();
#endif
return use_simd;
}
static void output_use_simd()
{
#ifdef SIMD_ENABLED
#ifdef HAS_SIMD_NEON
jlog("Stat: calc_dnn: ARM NEON instructions built-in\n");
#endif
#ifdef HAS_SIMD_NEONV2
jlog("Stat: calc_dnn: ARM NEONv2 instructions built-in\n");
#endif
#ifdef HAS_SIMD_FMA
jlog("Stat: calc_dnn: FMA instructions built-in\n");
#endif
#ifdef HAS_SIMD_AVX
jlog("Stat: calc_dnn: AVX instructions built-in\n");
#endif
#ifdef HAS_SIMD_SSE
jlog("Stat: calc_dnn: SSE instructions built-in\n");
#endif
#else /* ~SIMD_ENABLED */
jlog("Warning: NO built-in SIMD support, DNN computation may be too slow!\n");
return;
#endif /* ~SIMD_ENABLED */
#ifdef SIMD_ENABLED
if (use_simd == USE_SIMD_SSE) {
jlog("Stat: clac_dnn: use SSE SIMD instruction (128bit)\n");
} else if (use_simd == USE_SIMD_AVX) {
jlog("Stat: clac_dnn: use AVX SIMD instruction (256bit)\n");
} else if (use_simd == USE_SIMD_FMA) {
jlog("Stat: clac_dnn: use FMA SIMD instruction (256bit)\n");
} else if (use_simd == USE_SIMD_NEON) {
jlog("Stat: use ARM NEON instruction\n");
} else if (use_simd == USE_SIMD_NEONV2) {
jlog("Stat: use ARM NEONv2 instruction\n");
} else {
jlog("Warning: clac_dnn: no SIMD support, DNN computation may be too slow!\n");
}
#endif /* SIMD_ENABLED */
}
/************************************************************************/
/* .npy file load */
static boolean load_npy(float *array, char *filename, int x, int y)
{
FILE *fp;
unsigned char code;
char magic[6];
unsigned char major_version;
unsigned char minor_version;
unsigned short header_len;
char *header;
size_t len;
boolean fortran_order;
if ((fp = fopen_readfile(filename)) == NULL) {
jlog("Error: load_npy: unable to open: %s\n", filename);
return FALSE;
}
if ((len = myfread(&code, 1, 1, fp)) < 1) {
jlog("Error: load_npy: failed to read header: %s\n", filename);
fclose_readfile(fp);
return FALSE;
}
if (code != 0x93) {
jlog("Error: load_npy: wrong magic number, not an npy file: %s\n", filename);
return FALSE;
}
if ((len = myfread(magic, 1, 5, fp)) < 5) {
jlog("Error: load_npy: failed to read header: %s\n", filename);
fclose_readfile(fp);
return FALSE;
}
magic[5] = '\0';
if (strmatch(magic, "NUMPY") == FALSE) {
jlog("Error: load_npy: wrong magic header, not an npy file: %s\n", filename);
return FALSE;
}
if ((len = myfread(&major_version, 1, 1, fp)) < 1) {
jlog("Error: load_npy: failed to read header: %s\n", filename);
fclose_readfile(fp);
return FALSE;
}
/* we only assume Version 1.x format */
/* not check subversion x */
if (major_version != 1) {
jlog("Error: load_npy: can read only Version 1.0 but this file is Version %d\n", major_version);
fclose_readfile(fp);
return FALSE;
}
if ((len = myfread(&minor_version, 1, 1, fp)) < 1) {
jlog("Error: load_npy: failed to read header: %s\n", filename);
fclose_readfile(fp);
return FALSE;
}
/* currently not support all conversion */
/* accept only littlen endian 4byte float, with fortran order */
/* produce error if the file has other format */
if ((len = myfread(&header_len, 2, 1, fp)) < 1) {
jlog("Error: load_npy: failed to read header length: %s\n", filename);
fclose_readfile(fp);
return FALSE;
}
#ifdef WORDS_BIGENDIAN
swap_bytes(&header_len, 2, 1);
#endif
header = (char *)mymalloc(header_len + 1);
if ((len = myfread(header, 1, header_len, fp)) < header_len) {
jlog("Error: load_npy: failed to read header (%d bytes): %s\n", header_len, filename);
free(header);
fclose_readfile(fp);
return FALSE;
}
header[header_len] = '\0';
if (strstr(header, "'descr': '<f4'") == NULL) {
jlog("Error: load_npy: not a little-endian float array: %s\n", filename);
free(header);
fclose_readfile(fp);
return FALSE;
}
/* fortran order: data are stored per columns */
/* C order: data are stored per row */
if (strstr(header, "'fortran_order': True")) {
fortran_order = TRUE;
} else {
fortran_order = FALSE;
}
{
char buf[100];
sprintf(buf, "'shape': (%d, %d)", x, y);
if (strstr(header, buf) == NULL) {
sprintf(buf, "'shape': (%d, %d)", y, x);
if (strstr(header, buf) == NULL) {
jlog("Error: load_npy: not a (%d, %d) array? %s\n", x, y, filename);
free(header);
fclose_readfile(fp);
return FALSE;
}
}
}
free(header);
/* just read them in the order */
if ((len = myfread(array, 4, x * y, fp)) < x * y) {
jlog("Error: load_npy: failed to read %d bytes: %s\n", x * y, filename);
fclose_readfile(fp);
return FALSE;
}
fclose_readfile(fp);
return TRUE;
}
/************************************************************************/
/* standard logistic function value table: take range x[-6,6] */
/* table size: LOGISTIC_TABLE_FACTOR * 12 * 4 (bytes) */
#define LOGISTIC_TABLE_FACTOR 20000
#define LOGISTIC_TABLE_MAX (16 * LOGISTIC_TABLE_FACTOR)
#define LOGISTIC_MIN 0.000334
#define LOGISTIC_MAX 0.999666
static float logistic_table[LOGISTIC_TABLE_MAX+1]; /* logistic value table */
/* build logistic function value table */
static void logistic_table_build()
{
int i;
double d;
double x;
for (i = 0; i <= LOGISTIC_TABLE_MAX; i++) {
x = (double)i / (double)LOGISTIC_TABLE_FACTOR - 8.0;
d = 1.0 / (1.0 + exp(-x));
logistic_table[i] = (float)d;
}
}
/* return logistic function value, consulting table */
static float logistic_func(float x)
{
if (x <= -8.0f) return LOGISTIC_MIN;
if (x >= 8.0f) return LOGISTIC_MAX;
return logistic_table[(int)((x + 8.0f) * LOGISTIC_TABLE_FACTOR + 0.5)];
}
/* initialize dnn layer */
static void dnn_layer_init(DNNLayer *l)
{
l->w = NULL;
l->b = NULL;
l->in = 0;
l->out = 0;
#ifdef _OPENMP
l->begin = NULL;
l->end = NULL;
#endif /* _OPENMP */
#ifdef __NVCC__
l->dw = NULL;
l->db = NULL;
#endif /* __NVCC__ */
}
/* load dnn layer parameter from files */
static boolean dnn_layer_load(DNNLayer *l, int in, int out, char *wfile, char *bfile, int thread_num)
{
l->in = in;
l->out = out;
#ifdef SIMD_ENABLED
if (use_simd == USE_SIMD_AVX && l->in % 8 != 0) {
jlog("Error: dnn_layer_load: input vector length is not 8-element aligned (%d)\n", l->in);
return FALSE;
}
if (use_simd == USE_SIMD_SSE && l->in % 4 != 0) {
jlog("Error: dnn_layer_load: input vector length is not 4-element aligned (%d)\n", l->in);
return FALSE;
}
l->w = (float *)mymalloc_simd_aligned(sizeof(float) * l->out * l->in);
l->b = (float *)mymalloc_simd_aligned(sizeof(float) * l->out);
#else
l->w = (float *)mymalloc(sizeof(float) * l->out * l->in);
l->b = (float *)mymalloc(sizeof(float) * l->out);
#endif /* SIMD_ENABLED */
if (! load_npy(l->w, wfile, l->in, l->out)) return FALSE;
jlog("Stat: dnn_layer_load: loaded %s\n", wfile);
if (! load_npy(l->b, bfile, l->out, 1)) return FALSE;
jlog("Stat: dnn_layer_load: loaded %s\n", bfile);
#ifdef _OPENMP
/* divide into thread chunks */
if (l->begin == NULL) {
l->begin = (int *)mymalloc(sizeof(int) * thread_num);
}
if (l->end == NULL) {
l->end = (int *)mymalloc(sizeof(int) * thread_num);
}
int num = l->out / thread_num;
/* padding base chunk size to factor of 4 for better SIMD processing */
num = ((num + 3) / 4) * 4;
int i;
for (i = 0; i < thread_num; i++) {
l->begin[i] = num * i;
l->end[i] = num * i + num;
if (l->end[i] > l->out) l->end[i] = l->out;
}
#endif /* _OPENMP */
return TRUE;
}
/* clear dnn layer */
static void dnn_layer_clear(DNNLayer *l)
{
#ifdef SIMD_ENABLED
if (l->w != NULL) myfree_simd_aligned(l->w);
if (l->b != NULL) myfree_simd_aligned(l->b);
#else
if (l->w != NULL) free(l->w);
if (l->b != NULL) free(l->b);
#endif /* SIMD_ENABLED */
#ifdef _OPENMP
if (l->begin != NULL) free(l->begin);
if (l->end != NULL) free(l->end);
#endif /* _OPENMP */
#ifdef __NVCC__
cuda_layer_free(l);
#endif /* __NVCC__ */
dnn_layer_init(l);
}
/*********************************************************************/
DNNData *dnn_new()
{
DNNData *d;
d = (DNNData *)mymalloc(sizeof(DNNData));
memset(d, 0, sizeof(DNNData));
return d;
}
void dnn_clear(DNNData *dnn)
{
int i;
#ifdef __NVCC__
cuda_dnn_clear(dnn);
#endif /* __NVCC__ */
if (dnn->h) {
for (i = 0; i < dnn->hnum; i++) {
dnn_layer_clear(&(dnn->h[i]));
}
free(dnn->h);
}
dnn_layer_clear(&(dnn->o));
if (dnn->state_prior) free(dnn->state_prior);
for (i = 0; i < dnn->hnum; i++) {
if (dnn->work[i]) {
#ifdef SIMD_ENABLED
myfree_simd_aligned(dnn->work[i]);
#else
free(dnn->work[i]);
#endif
}
}
free(dnn->work);
#ifdef SIMD_ENABLED
if (dnn->invec) myfree_simd_aligned(dnn->invec);
if (dnn->accum) myfree_aligned(dnn->accum);
#endif
memset(dnn, 0, sizeof(DNNData));
}
void dnn_free(DNNData *dnn)
{
dnn_clear(dnn);
free(dnn);
}
/************************************************************************/
static void
sub1(float *dst, float *src, float *w, float *b, int out, int in, float *fstore)
{
float *s;
int i, j;
for (i = 0; i < out; i++) {
float x = 0.0f;
s = src;
for (j = 0; j < in; j++) {
x += *(w++) * *(s++);
}
*(dst++) = x + *(b++);
}
}
/************************************************************************/
/* initialize dnn */
boolean dnn_setup(DNNData *dnn, int veclen, int contextlen, int inputnodes, int outputnodes, int hiddennodes, int hiddenlayernum, char **wfile, char **bfile, char *output_wfile, char *output_bfile, char *priorfile, float prior_factor, boolean state_prior_log10nize, int batchsize, int num_threads, char *cuda_mode)
{
int i;
/* check if CPU has SIMD instruction support */
#ifdef SIMD_ENABLED
cpu_id_check();
#endif
if (dnn == NULL) return FALSE;
/* clear old data if exist */
dnn_clear(dnn);
/* build logistic table */
logistic_table_build();
/* set values */
dnn->batch_size = batchsize;
dnn->veclen = veclen;
dnn->contextlen = contextlen;
dnn->inputnodenum = inputnodes;
dnn->hiddennodenum = hiddennodes;
dnn->outputnodenum = outputnodes;
dnn->prior_factor = prior_factor;
dnn->num_threads = num_threads;
#ifdef __NVCC__
dnn->blocksize1 = 0;
dnn->blocksize2 = 0;
if (cuda_mode == NULL) {
dnn->use_cuda = TRUE;
dnn->use_cuda_shared = FALSE;
} else if (strmatch(cuda_mode, "disable")) {
dnn->use_cuda = FALSE;
dnn->use_cuda_shared = FALSE;
} else if (strnmatch(cuda_mode, "global", 6)) {
dnn->use_cuda = TRUE;
dnn->use_cuda_shared = FALSE;
if (strlen(cuda_mode) > 6) {
char *buf = strdup(cuda_mode + 6);
char *p, *save;
int n = 0;
for (p = mystrtok_safe(buf, ",", &save); p; p = mystrtok_safe(NULL, ",", &save)) {
switch(n) {
case 0:
dnn->blocksize1 = atoi(p);
break;
default:
jlog("Error: dnn_init: too many CUDA mode parameter: %s\n", cuda_mode);
return FALSE;
}
n++;
}
free(buf);
}
} else if (strnmatch(cuda_mode, "shared", 6)) {
dnn->use_cuda = TRUE;
dnn->use_cuda_shared = TRUE;
if (strlen(cuda_mode) > 6) {
#if 1
jlog("Error: dnn_init: CUDA shared mode block parameters are fixed to 16x8, remove the parameters: %s\n", cuda_mode);
return FALSE;
#else
char *buf = strdup(cuda_mode + 6);
char *p, *save;
int n = 0;
for (p = mystrtok_safe(buf, ",", &save); p; p = mystrtok_safe(NULL, ",", &save)) {
switch(n) {
case 0:
dnn->blocksize1 = atoi(p);
break;
case 1:
dnn->blocksize2 = atoi(p);
break;
default:
jlog("Error: dnn_init: too many CUDA mode parameter: %s\n", cuda_mode);
return FALSE;
}
n++;
}
free(buf);
#endif
}
}
#else
if (cuda_mode != NULL && strmatch(cuda_mode, "disable") == FALSE) {
jlog("Error: dnn_init: CUDA mode specified as \"%s\" but no CUDA support is built-in\n", cuda_mode);
return FALSE;
}
#endif /* __NVCC__ */
#ifdef _OPENMP
/* set number of threads */
int max_num_threads = omp_get_max_threads();
if (dnn->num_threads > max_num_threads) {
jlog("Warning: dnn_init: %d threads requested but available max is %d\n", dnn->num_threads, max_num_threads);
dnn->num_threads = max_num_threads;
}
jlog("Stat: dnn_init: use %d threads for DNN computation (max %d cores)\n", dnn->num_threads, max_num_threads);
#endif /* OPENMP */
#ifdef __NVCC__
// copy logistic_table to GPU
if (dnn->use_cuda) {
cuda_copy_logistic_table(logistic_table, LOGISTIC_TABLE_MAX + 1);
jlog("Stat: dnn_init: logistic table copied to GPU\n");
}
#endif /* __NVCC__ */
/* check for input length */
{
int inputlen = veclen * contextlen;
if (inputnodes != inputlen) {
jlog("Error: dnn_init: veclen(%d) * contextlen(%d) != inputnodes(%d)\n", veclen, contextlen, inputnodes);
return FALSE;
}
jlog("Stat: dnn_init: input: vec %d * context %d = %d dim\n", veclen, contextlen, inputlen);
jlog("Stat: dnn_init: input layer: %d dim\n", inputnodes);
jlog("Stat: dnn_init: %d hidden layer(s): %d dim\n", hiddenlayernum, hiddennodes);
jlog("Stat: dnn_init: output layer: %d dim\n", outputnodes);
}
/* initialize layers */
dnn->hnum = hiddenlayernum;
dnn->h = (DNNLayer *)mymalloc(sizeof(DNNLayer) * dnn->hnum);
for (i = 0; i < dnn->hnum; i++) {
dnn_layer_init(&(dnn->h[i]));
}
dnn_layer_init(&(dnn->o));
/* load layer parameters */
if (dnn_layer_load(&(dnn->h[0]), inputnodes, hiddennodes, wfile[0], bfile[0], dnn->num_threads) == FALSE) return FALSE;
for (i = 1; i < dnn->hnum; i++) {
if (dnn_layer_load(&(dnn->h[i]), hiddennodes, hiddennodes, wfile[i], bfile[i], dnn->num_threads) == FALSE) return FALSE;
}
if (dnn_layer_load(&(dnn->o), hiddennodes, outputnodes, output_wfile, output_bfile, dnn->num_threads) == FALSE) return FALSE;
#ifdef __NVCC__
// load DNN layer definitions to GPU
if (dnn->use_cuda) {
for (i = 0; i < dnn->hnum; i++) {
cuda_layer_load(&(dnn->h[i]));
jlog("Stat: dnn_init: layer #%d loaded to GPU\n", i);
}
cuda_layer_load(&(dnn->o));
jlog("Stat: dnn_init: output layer loaded to GPU\n");
}
#endif /* __NVCC__ */
/* load state prior */
{
FILE *fp;
int id;
float val;
dnn->state_prior_num = outputnodes;
dnn->state_prior = (float *)mymalloc(sizeof(float) * dnn->state_prior_num);
for (i = 0; i < dnn->state_prior_num; i++) {
dnn->state_prior[i] = 0.0f;
}
if ((fp = fopen(priorfile, "r")) == NULL) {
jlog("Error: cannot open %s\n", priorfile);
return FALSE;
}
while (fscanf(fp, "%d %e", &id, &val) != EOF){
if (id < 0 || id >= dnn->state_prior_num) {
jlog("Error: wrong state id in prior file (%d)\n", id);
fclose_readfile(fp);
return FALSE;
}
dnn->state_prior[id] = val * prior_factor;
if (state_prior_log10nize) {
// log10-nize prior
dnn->state_prior[id] = log10(dnn->state_prior[id]);
}
}
fclose(fp);
jlog("Stat: dnn_init: state prior loaded: %s\n", priorfile);
}
/* allocate work area */
dnn->work = (float **)mymalloc(sizeof(float *) * dnn->hnum);
for (i = 0; i < dnn->hnum; i++) {
#ifdef SIMD_ENABLED
dnn->work[i] = (float *)mymalloc_simd_aligned(sizeof(float) * dnn->hiddennodenum);
#else
dnn->work[i] = (float *)mymalloc(sizeof(float) * dnn->hiddennodenum);
#endif
}
#ifdef SIMD_ENABLED
dnn->invec = (float *)mymalloc_simd_aligned(sizeof(float) * inputnodes);
#ifdef _OPENMP
dnn->accum = (float *)mymalloc_simd_aligned(32 * dnn->num_threads);
#else
dnn->accum = (float *)mymalloc_simd_aligned(32);
#endif /* OPENMP */
#endif
#ifdef __NVCC__
if (dnn->use_cuda) cuda_dnn_setup(dnn);
if (dnn->use_cuda) {
if (dnn->use_cuda_shared) {
jlog("Stat: dnn_init: CUDA mode: shared, block size = %d x %d\n", dnn->blocksize1, dnn->blocksize2);
} else {
jlog("Stat: dnn_init: CUDA mode: global, block size = %d\n", dnn->blocksize1);
}
} else {
jlog("Stat: dnn_init: disabled CUDA support for DNN computation\n");
}
#else
jlog("Stat: dnn_init: no CUDA support is built in, CUDA will not be used\n");
#endif /* __NVCC__ */
/* choose sub function */
#ifdef SIMD_ENABLED
switch(use_simd) {
case USE_SIMD_FMA:
dnn->subfunc = calc_dnn_fma;
break;
case USE_SIMD_AVX:
dnn->subfunc = calc_dnn_avx;
break;
case USE_SIMD_SSE:
dnn->subfunc = calc_dnn_sse;
break;
case USE_SIMD_NEON:
dnn->subfunc = calc_dnn_neon;
break;
case USE_SIMD_NEONV2:
dnn->subfunc = calc_dnn_neonv2;
break;
default:
dnn->subfunc = sub1;
break;
}
#else
dnn->subfunc = sub1;
#endif /* SIMD_ENABLED */
/* output CPU related info */
output_use_simd();
return TRUE;
}
void dnn_calc_outprob(HMMWork *wrk)
{
float *src;
DNNData *dnn = wrk->OP_dnn;
#ifndef _OPENMP
int n = 0;
int hidx, i;
float *dst;
DNNLayer *h;
#endif
#ifdef __NVCC__
if (dnn->use_cuda) {
cuda_calc_outprob(wrk);
return;
}
#endif
/* frame = wrk->OP_time */
/* param = wrk->OP_param */
/* input vector = wrk->OP_param[wrk->OP_time][] */
/* store state outprob to wrk->last_cache[] */
/* feed forward through hidden layers by standard logistic function */
#ifdef SIMD_ENABLED
memcpy(dnn->invec, &(wrk->OP_param->parvec[wrk->OP_time][0]), sizeof(float) * dnn->inputnodenum);
src = dnn->invec;
#else
src = &(wrk->OP_param->parvec[wrk->OP_time][0]);
#endif /* SIMD_ENABLED */
#ifdef _OPENMP
#pragma omp parallel num_threads(dnn->num_threads)
{
int hidx, i;
float *lsrc, *dst;
DNNLayer *h;
int id = omp_get_thread_num();
int j;
lsrc = src;
for (hidx = 0; hidx < dnn->hnum; hidx++) {
dst = dnn->work[hidx];
h = &(dnn->h[hidx]);
(*dnn->subfunc)(dst + h->begin[id] , lsrc, h->w + h->begin[id] * h->in, h->b + h->begin[id], h->end[id] - h->begin[id], h->in, dnn->accum + id * 8);
for (j = h->begin[id] ; j < h->end[id]; j++) {
if (dst[j] <= -8.0f)
dst[j] = LOGISTIC_MIN;
else if (dst[j] >= 8.0f)
dst[j] = LOGISTIC_MAX;
else
dst[j] = logistic_table[(int)((dst[j] + 8.0f) * LOGISTIC_TABLE_FACTOR + 0.5)];
}
#pragma omp barrier
lsrc = dst;
}
/* compute output layer */
(*dnn->subfunc)(wrk->last_cache + dnn->o.begin[id] , lsrc, dnn->o.w + dnn->o.begin[id] * dnn->o.in, dnn->o.b + dnn->o.begin[id], dnn->o.end[id] - dnn->o.begin[id], dnn->o.in, dnn->accum + id * 8);
}
#else /* ~_OPENMP */
for (hidx = 0; hidx < dnn->hnum; hidx++) {
dst = dnn->work[hidx];
h = &(dnn->h[hidx]);
(*dnn->subfunc)(dst, src, h->w, h->b, h->out, h->in, dnn->accum);
for (i = 0; i < h->out; i++) {
dst[i] = logistic_func(dst[i]);
}
src = dst;
}
/* compute output layer */
(*dnn->subfunc)(wrk->last_cache, src, dnn->o.w, dnn->o.b, dnn->o.out, dnn->o.in, dnn->accum);
#endif /* _OPENMP */
/* do softmax */
/* INV_LOG_TEN * (x - addlogarray(x)) - log10(state_prior)) */
#ifdef NO_SUM_COMPUTATION
/* not compute sum */
for (i = 0; i < wrk->statenum; i++) {
wrk->last_cache[i] = INV_LOG_TEN * wrk->last_cache[i] - dnn->state_prior[i];
}
#else
/* compute sum */
{
int i;
float logprob = addlog_array(wrk->last_cache, wrk->statenum);
for (i = 0; i < wrk->statenum; i++) {
wrk->last_cache[i] = INV_LOG_TEN * (wrk->last_cache[i] - logprob) - dnn->state_prior[i];
}
}
#endif /* NO_SUM_COMPUTATION */
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 24;
tile_size[3] = 32;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
extract_csk.c | /*CSK raw data extractor.
* Original Author: Walter Szeliga
* Optimized version: Piyush Agram
* Changes for optimization:
* - No more hyperslab selection.
* - Direct write of dataset to memory mapped raw file.
* - OpenMP loop on the memory mapped file to adjust the values. */
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <sys/types.h>
#include <sys/fcntl.h>
#include <unistd.h>
#include <sys/mman.h>
#include <string.h>
#include <omp.h>
#include "hdf5.h"
double getExtrema(double *lut_data);
double* extractLUT(hid_t file);
int extractImage(hid_t file,char *outFile,double *lut_data);
int
extract_csk(char *filename, char *outFile)
{
double *lut_data;
hid_t file;
herr_t status;
/* Open the file and get the dataset */
file = H5Fopen(filename,H5F_ACC_RDONLY, H5P_DEFAULT);
if (file < 0)
{
fprintf(stderr,"Unable to open file: %s\n",filename);
return EXIT_FAILURE;
}
lut_data = extractLUT(file);
extractImage(file,outFile,lut_data);
status = H5Fclose(file);
free(lut_data);
return 0;
}
double *
extractLUT(hid_t file)
{
int i, ndims;
double val;
double *double_lut;
hid_t lut_attr,lut_space;
hsize_t dims[1];
herr_t status;
lut_attr = H5Aopen_name(file,"Analog Signal Reconstruction Levels");
lut_space = H5Aget_space(lut_attr);
ndims = H5Sget_simple_extent_dims(lut_space,dims,NULL);
double_lut = (double *)malloc(dims[0]*sizeof(double));
status = H5Aread(lut_attr, H5T_NATIVE_DOUBLE, double_lut);
for(i=0;i<dims[0];i++)
{
if(isnan(double_lut[i]))
{
double_lut[i] = 0.0;
}
}
H5Aclose(lut_attr);
H5Sclose(lut_space);
return double_lut;
}
double
getExtrema(double *double_lut)
{
int i;
double min,max;
max = DBL_MIN;
min = DBL_MAX;
for(i=0;i<256;i++)
{
if (double_lut[i] > max)
{
max = double_lut[i];
}
if (double_lut[i] < min)
{
min = double_lut[i];
}
}
printf("Max: %lf\n",max);
return max;
}
int
extractImage(hid_t file, char* outFile, double *lut_data)
{
unsigned char *IQ,*data;
int i,j,k;
hid_t type,native_type;
hid_t dataset,dataspace, cparms;
hsize_t dims[3],chunk[3];
hsize_t count_out;
herr_t status;
int out;
long index;
unsigned char I;
double max = getExtrema(lut_data);
max = hypot(max,max);
#if H5Dopen_vers == 2
dataset = H5Dopen2(file,"/S01/B001",H5P_DEFAULT);
#else
dataset = H5Dopen(file,"/S01/B001");
#endif
type = H5Dget_type(dataset);
native_type = H5Tget_native_type(type,H5T_DIR_ASCEND);
dataspace = H5Dget_space(dataset);
status = H5Sget_simple_extent_dims(dataspace, dims, NULL);
printf("Dimensions %lu x %lu x %lu\n",(unsigned long)dims[0],(unsigned long)dims[1],(unsigned long)dims[2]);
/* Memory map output file */
out = open(outFile, O_RDWR | O_CREAT, (mode_t)0600);
if(ftruncate(out,(dims[0]*dims[1]*dims[2]*sizeof(unsigned char))) == -1 )
{
fprintf(stderr,"Unable to create file %s\n",outFile);
close(out);
return 1;
}
data = (char *)mmap(0,dims[0]*dims[1]*dims[2]*sizeof(unsigned char), PROT_READ | PROT_WRITE, MAP_SHARED, out, 0);
/* Check if the dataset is chunked */
cparms = H5Dget_create_plist(dataset);
if (H5D_CHUNKED == H5Pget_layout(cparms))
{
status = H5Pget_chunk(cparms,3,chunk);
printf("The dataset is chunked. \n");
printf("Chunk size: %lu x %lu x %lu \n", (unsigned long) chunk[0], (unsigned long) chunk[1], (unsigned long) chunk[2]);
}
IQ = (unsigned char*)malloc(2*dims[1]*sizeof(unsigned char));
//Lets do the whole thing in one go
//Super fast but we need type conversion
status = H5Dread(dataset, native_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
count_out = dims[1]*2;
for(k=0; k<dims[0]; k++)
{
index = 2*dims[1]*k;
memcpy(IQ,data+index, count_out);
#pragma omp parallel for\
shared(IQ, lut_data, max, count_out) \
private(i)
for(i=0; i<count_out; i++)
IQ[i] = (unsigned char) (127.0*lut_data[IQ[i]]/max + 127.0);
memcpy(data+index, IQ, count_out);
}
munmap(data,(2*dims[0]*dims[1]*sizeof(unsigned char)));
close(out);
free(IQ);
/*Cleanup*/
status = H5Pclose(cparms);
status = H5Sclose(dataspace);
status = H5Dclose(dataset);
return 0;
}
|
GB_binop__div_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__div_int64
// A.*B function (eWiseMult): GB_AemultB__div_int64
// A*D function (colscale): GB_AxD__div_int64
// D*A function (rowscale): GB_DxB__div_int64
// C+=B function (dense accum): GB_Cdense_accumB__div_int64
// C+=b function (dense accum): GB_Cdense_accumb__div_int64
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__div_int64
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__div_int64
// C=scalar+B GB_bind1st__div_int64
// C=scalar+B' GB_bind1st_tran__div_int64
// C=A+scalar GB_bind2nd__div_int64
// C=A'+scalar GB_bind2nd_tran__div_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_IDIV_SIGNED (aij, bij, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_SIGNED (x, y, 64) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_DIV || GxB_NO_INT64 || GxB_NO_DIV_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__div_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__div_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__div_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__div_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__div_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__div_int64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__div_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t bij = Bx [p] ;
Cx [p] = GB_IDIV_SIGNED (x, bij, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__div_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int64_t aij = Ax [p] ;
Cx [p] = GB_IDIV_SIGNED (aij, y, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (x, aij, 64) ; \
}
GrB_Info GB_bind1st_tran__div_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_SIGNED (aij, y, 64) ; \
}
GrB_Info GB_bind2nd_tran__div_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
schedule-test.c | /**
* Generate predictable page access patterns in order to sanity check the page
* analyis trace & thread placement framework. There are several access
* patterms implemented, but each pattern has 4 threads sharing an individual
* page and hence those 4 threads should be placed together by the partitioning
* algorithm.
*
* Author: Rob Lyerly <rlyerly@vt.edu>
* Date: 1/24/2018
*/
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <omp.h>
#include <sys/sysinfo.h>
#define XSTR( x ) STR(x)
#define STR( x ) #x
/*
* Knobs controlling length of OpenMP sections & memory used. The number of
* iterations can be set at command line.
*/
#define ITERS 2048
#define PAGES 1024
/* Size definitions */
#define PAGESZ 4096
#define INTS_PER_PAGE (PAGESZ / sizeof(int))
#define ARRSIZE (PAGES * INTS_PER_PAGE)
#define CHUNKSZ (INTS_PER_PAGE / 4)
#define NS( ts ) ((ts.tv_sec * 1000000000LU) + ts.tv_nsec)
typedef int page_array_t [ARRSIZE] __attribute__((aligned(4096)));
static page_array_t thearray;
#define helptext \
"Generate a predictable page access pattern to sanity check the thread\
placement framework.\n\n\
Usage: thread-schedule [ OPTIONS ]\n\
Options:\n\
-h : print help & exit\n\
-i num : number of iterations to run each access pattern (default: " XSTR(ITERS) ")\n\
-t num : number of threads to use\n"
void parse_args(int argc, char** argv, size_t *threads, size_t *iters) {
int c;
while((c = getopt(argc, argv, "hi:t:")) != -1) {
switch(c) {
default: printf("WARNING: Ignoring unknown argument '%c'\n", c); break;
case 'h': printf(helptext); exit(0); break;
case 'i': *iters = atoi(optarg); break;
case 't': *threads = atoi(optarg); break;
}
}
}
void randomize(page_array_t array) {
size_t i;
#pragma omp parallel for
for(i = 0; i < ARRSIZE; i++) array[i] = rand() % 1024;
}
/*
* Pattern 1: groups of 4 consecutive threads should be mapped to the same
* node.
*/
void add1(page_array_t array, const size_t iters) {
size_t iter, i;
struct timespec start, end;
printf("Region 1: consecutive threads access the same page...");
fflush(stdout);
clock_gettime(CLOCK_MONOTONIC, &start);
#pragma omp parallel private(iter)
{
// TODO make this region 1
for(iter = 0; iter < iters; iter++) {
#pragma omp for schedule(static, CHUNKSZ)
for(i = 0; i < ARRSIZE; i++) array[i] += 1;
}
} /* end parallel */
clock_gettime(CLOCK_MONOTONIC, &end);
printf("%lu ms\n", (NS(end) - NS(start)) / 1000000);
}
/*
* Pattern 2: threads with the same parity should be mapped to the same node
* (e.g., evens with evens, odds with odds).
*/
void add2(page_array_t array, const size_t iters) {
size_t iter, i;
struct timespec start, end;
printf("Region 2: threads with the same parity access the same page...");
fflush(stdout);
clock_gettime(CLOCK_MONOTONIC, &start);
#pragma omp parallel private(iter)
{
long offset;
int thread = omp_get_thread_num() % 8;
if(thread % 2) offset = (4 + (thread / 2) - thread) * CHUNKSZ;
else offset = -((thread / 2) * CHUNKSZ);
// TODO make this region 2
for(iter = 0; iter < iters; iter++) {
#pragma omp for schedule(static, CHUNKSZ)
for(i = 0; i < ARRSIZE; i++) array[i + offset] += 2;
}
} /* end parallel */
clock_gettime(CLOCK_MONOTONIC, &end);
printf("%lu ms\n", (NS(end) - NS(start)) / 1000000);
}
int main(int argc, char** argv) {
size_t threads = get_nprocs_conf(), iters = ITERS;
struct timespec start, end;
parse_args(argc, argv, &threads, &iters);
omp_set_num_threads(threads);
randomize(thearray);
printf("--------------------\nTHREAD SCHEDULE TEST\n--------------------\n");
printf("Running %lu iterations with %lu threads...\n", iters, threads);
clock_gettime(CLOCK_MONOTONIC, &start);
add1(thearray, iters);
add2(thearray, iters);
clock_gettime(CLOCK_MONOTONIC, &end);
printf("Total execution time: %lu ms\n", (NS(end) - NS(start)) / 1000000);
}
|
GB_unop__minv_int8_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__minv_int8_int8)
// op(A') function: GB (_unop_tran__minv_int8_int8)
// C type: int8_t
// A type: int8_t
// cast: int8_t cij = aij
// unaryop: cij = GB_IMINV_SIGNED (aij, 8)
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IMINV_SIGNED (x, 8) ;
// casting
#define GB_CAST(z, aij) \
int8_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int8_t z = aij ; \
Cx [pC] = GB_IMINV_SIGNED (z, 8) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINV || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__minv_int8_int8)
(
int8_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int8_t z = aij ;
Cx [p] = GB_IMINV_SIGNED (z, 8) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__minv_int8_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int32_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int32_fc64)
// op(A') function: GB (_unop_tran__identity_int32_fc64)
// C type: int32_t
// A type: GxB_FC64_t
// cast: int32_t cij = GB_cast_to_int32_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = GB_cast_to_int32_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int32_fc64)
(
int32_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
int32_t z = GB_cast_to_int32_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int32_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quadrature.c | #include "quadrature.h"
double trapezoid (double (*fp)(double), double a, double b, int N)
{
/*
Estimate the integral of f(x) from a to b using the
Trapezoid Rule with n points.
Input:
f: the function to integrate
a: left endpoint
b: right endpoint
N: number of points to use
Returns:
numerical estimate of the integral
*/
double h = (b-a)/(N-1), total = 0.0;
#pragma omp parallel for reduction(+ : total)
for (int m=0; m<N; m++) {
double x = m*h + a;
total += fp(x);
}
total -= 0.5*(fp(a)+fp(b));
return h*total;
}
double simpson (double (*fp)(double), double a, double b, int N)
{
/*
Estimate the integral of f(x) from a to b using Simpson's Rule
with n points.
Input:
f: the function to integrate
a: left endpoint
b: right endpoint
N: number of points to use
Returns:
numerical estimate of the integral
*/
double h = (b-a)/(N-1), total = 0.0;
#pragma omp parallel for reduction(+ : total)
for (int m = 0; m < N; m++) {
double xj = m*h + a;
double xc = xj + h/2.0;
total += 2.0*fp(xj) + 4.0*fp(xc);
}
/* Remove the extra values from the total */
total -= fp(a) + fp(b) + 4.0*fp(b + h/2.0);
return (h/6.0)*total;
}
void error_table (double (*fp)(double), double a, double b,
int nrows, int nvals[], double int_true,
double (*method)(double (*)(double), double, double, int))
{
double ratio, last = 1.0;
printf ("%8s %22s %13s %13s\n", "n", "trapezoid", "error", "ratio");
for (int m=0;m<nrows;m++) {
double val = method ( fp, a, b, nvals[m] );
double err = fabs(val - int_true);
ratio = last/err;
last = err;
printf("%8d %22.14e %13.3e %13.3e\n", nvals[m], val, err, ratio);
}
return;
}
|
decorate.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% DDDD EEEEE CCCC OOO RRRR AAA TTTTT EEEEE %
% D D E C O O R R A A T E %
% D D EEE C O O RRRR AAAAA T EEE %
% D D E C O O R R A A T E %
% DDDD EEEEE CCCC OOO R R A A T EEEEE %
% %
% %
% MagickCore Image Decoration Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/memory_.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/transform.h"
/*
Define declarations.
*/
#define AccentuateModulate ScaleCharToQuantum(80)
#define HighlightModulate ScaleCharToQuantum(125)
#define ShadowModulate ScaleCharToQuantum(135)
#define DepthModulate ScaleCharToQuantum(185)
#define TroughModulate ScaleCharToQuantum(110)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% B o r d e r I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% BorderImage() surrounds the image with a border of the color defined by
% the bordercolor member of the image structure. The width and height
% of the border are defined by the corresponding members of the border_info
% structure.
%
% The format of the BorderImage method is:
%
% Image *BorderImage(const Image *image,const RectangleInfo *border_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o border_info: define the width and height of the border.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *BorderImage(const Image *image,
const RectangleInfo *border_info,const CompositeOperator compose,
ExceptionInfo *exception)
{
Image
*border_image,
*clone_image;
FrameInfo
frame_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(border_info != (RectangleInfo *) NULL);
frame_info.width=image->columns+(border_info->width << 1);
frame_info.height=image->rows+(border_info->height << 1);
frame_info.x=(ssize_t) border_info->width;
frame_info.y=(ssize_t) border_info->height;
frame_info.inner_bevel=0;
frame_info.outer_bevel=0;
clone_image=CloneImage(image,0,0,MagickTrue,exception);
if (clone_image == (Image *) NULL)
return((Image *) NULL);
clone_image->matte_color=image->border_color;
border_image=FrameImage(clone_image,&frame_info,compose,exception);
clone_image=DestroyImage(clone_image);
if (border_image != (Image *) NULL)
border_image->matte_color=image->matte_color;
return(border_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F r a m e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FrameImage() adds a simulated three-dimensional border around the image.
% The color of the border is defined by the matte_color member of image.
% Members width and height of frame_info specify the border width of the
% vertical and horizontal sides of the frame. Members inner and outer
% indicate the width of the inner and outer shadows of the frame.
%
% The format of the FrameImage method is:
%
% Image *FrameImage(const Image *image,const FrameInfo *frame_info,
% const CompositeOperator compose,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o frame_info: Define the width and height of the frame and its bevels.
%
% o compose: the composite operator.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *FrameImage(const Image *image,const FrameInfo *frame_info,
const CompositeOperator compose,ExceptionInfo *exception)
{
#define FrameImageTag "Frame/Image"
CacheView
*image_view,
*frame_view;
Image
*frame_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelInfo
accentuate,
highlight,
matte,
shadow,
trough;
register ssize_t
x;
size_t
bevel_width,
height,
width;
ssize_t
y;
/*
Check frame geometry.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(frame_info != (FrameInfo *) NULL);
if ((frame_info->outer_bevel < 0) || (frame_info->inner_bevel < 0))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
bevel_width=(size_t) (frame_info->outer_bevel+frame_info->inner_bevel);
x=(ssize_t) frame_info->width-frame_info->x-bevel_width;
y=(ssize_t) frame_info->height-frame_info->y-bevel_width;
if ((x < (ssize_t) image->columns) || (y < (ssize_t) image->rows))
ThrowImageException(OptionError,"FrameIsLessThanImageSize");
/*
Initialize framed image attributes.
*/
frame_image=CloneImage(image,frame_info->width,frame_info->height,MagickTrue,
exception);
if (frame_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(frame_image,DirectClass,exception) == MagickFalse)
{
frame_image=DestroyImage(frame_image);
return((Image *) NULL);
}
if ((IsPixelInfoGray(&frame_image->border_color) == MagickFalse) &&
(IsGrayColorspace(frame_image->colorspace) != MagickFalse))
(void) SetImageColorspace(frame_image,sRGBColorspace,exception);
if ((frame_image->matte_color.alpha_trait != UndefinedPixelTrait) &&
(frame_image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlpha(frame_image,OpaqueAlpha,exception);
frame_image->page=image->page;
if ((image->page.width != 0) && (image->page.height != 0))
{
frame_image->page.width+=frame_image->columns-image->columns;
frame_image->page.height+=frame_image->rows-image->rows;
}
/*
Initialize 3D effects color.
*/
matte=image->matte_color;
accentuate=matte;
accentuate.red=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.red+(QuantumRange*AccentuateModulate)));
accentuate.green=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.green+(QuantumRange*AccentuateModulate)));
accentuate.blue=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.blue+(QuantumRange*AccentuateModulate)));
accentuate.black=(double) (QuantumScale*((QuantumRange-
AccentuateModulate)*matte.black+(QuantumRange*AccentuateModulate)));
accentuate.alpha=matte.alpha;
highlight=matte;
highlight.red=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.red+(QuantumRange*HighlightModulate)));
highlight.green=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.green+(QuantumRange*HighlightModulate)));
highlight.blue=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.blue+(QuantumRange*HighlightModulate)));
highlight.black=(double) (QuantumScale*((QuantumRange-
HighlightModulate)*matte.black+(QuantumRange*HighlightModulate)));
highlight.alpha=matte.alpha;
shadow=matte;
shadow.red=QuantumScale*matte.red*ShadowModulate;
shadow.green=QuantumScale*matte.green*ShadowModulate;
shadow.blue=QuantumScale*matte.blue*ShadowModulate;
shadow.black=QuantumScale*matte.black*ShadowModulate;
shadow.alpha=matte.alpha;
trough=matte;
trough.red=QuantumScale*matte.red*TroughModulate;
trough.green=QuantumScale*matte.green*TroughModulate;
trough.blue=QuantumScale*matte.blue*TroughModulate;
trough.black=QuantumScale*matte.black*TroughModulate;
trough.alpha=matte.alpha;
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
frame_view=AcquireAuthenticCacheView(frame_image,exception);
height=(size_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw top of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,0,frame_image->columns,
height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw top of ornamental border.
*/
for (y=0; y < (ssize_t) frame_info->outer_bevel; y++)
{
for (x=0; x < (ssize_t) (frame_image->columns-y); x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) (frame_info->y-bevel_width); y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=0; y < (ssize_t) frame_info->inner_bevel; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
width=image->columns+((size_t) frame_info->inner_bevel << 1)-
y;
for (x=0; x < (ssize_t) width; x++)
{
if (x < y)
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
/*
Draw sides of ornamental border.
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,frame_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
size_t
width;
/*
Initialize scanline with matte color.
*/
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(frame_view,0,frame_info->y+y,
frame_image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
/*
Set frame interior pixels.
*/
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(frame_image,&frame_image->border_color,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->inner_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
if (SyncCacheViewAuthenticPixels(frame_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FrameImage)
#endif
proceed=SetImageProgress(image,FrameImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
height=(size_t) (frame_info->inner_bevel+frame_info->height-
frame_info->y-image->rows-bevel_width+frame_info->outer_bevel);
if (height != 0)
{
register ssize_t
x;
register Quantum
*magick_restrict q;
/*
Draw bottom of ornamental border.
*/
q=QueueCacheViewAuthenticPixels(frame_view,0,(ssize_t) (frame_image->rows-
height),frame_image->columns,height,exception);
if (q != (Quantum *) NULL)
{
/*
Draw bottom of ornamental border.
*/
for (y=frame_info->inner_bevel-1; y >= 0; y--)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) (frame_info->x-bevel_width); x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) (image->columns+2*frame_info->inner_bevel); x++)
{
if (x >= (ssize_t) (image->columns+2*frame_info->inner_bevel-y))
SetPixelViaPixelInfo(frame_image,&highlight,q);
else
SetPixelViaPixelInfo(frame_image,&accentuate,q);
q+=GetPixelChannels(frame_image);
}
width=frame_info->width-frame_info->x-image->columns-bevel_width;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
height=frame_info->height-frame_info->y-image->rows-bevel_width;
for (y=0; y < (ssize_t) height; y++)
{
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
width=frame_image->columns-2*frame_info->outer_bevel;
for (x=0; x < (ssize_t) width; x++)
{
SetPixelViaPixelInfo(frame_image,&matte,q);
q+=GetPixelChannels(frame_image);
}
for (x=0; x < (ssize_t) frame_info->outer_bevel; x++)
{
SetPixelViaPixelInfo(frame_image,&shadow,q);
q+=GetPixelChannels(frame_image);
}
}
for (y=frame_info->outer_bevel-1; y >= 0; y--)
{
for (x=0; x < y; x++)
{
SetPixelViaPixelInfo(frame_image,&highlight,q);
q+=GetPixelChannels(frame_image);
}
for ( ; x < (ssize_t) frame_image->columns; x++)
{
if (x >= (ssize_t) (frame_image->columns-y))
SetPixelViaPixelInfo(frame_image,&shadow,q);
else
SetPixelViaPixelInfo(frame_image,&trough,q);
q+=GetPixelChannels(frame_image);
}
}
(void) SyncCacheViewAuthenticPixels(frame_view,exception);
}
}
frame_view=DestroyCacheView(frame_view);
image_view=DestroyCacheView(image_view);
x=(ssize_t) (frame_info->outer_bevel+(frame_info->x-bevel_width)+
frame_info->inner_bevel);
y=(ssize_t) (frame_info->outer_bevel+(frame_info->y-bevel_width)+
frame_info->inner_bevel);
if (status != MagickFalse)
status=CompositeImage(frame_image,image,compose,MagickTrue,x,y,
exception);
if (status == MagickFalse)
frame_image=DestroyImage(frame_image);
return(frame_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R a i s e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RaiseImage() creates a simulated three-dimensional button-like effect
% by lightening and darkening the edges of the image. Members width and
% height of raise_info define the width of the vertical and horizontal
% edge of the effect.
%
% The format of the RaiseImage method is:
%
% MagickBooleanType RaiseImage(const Image *image,
% const RectangleInfo *raise_info,const MagickBooleanType raise,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o raise_info: Define the width and height of the raise area.
%
% o raise: A value other than zero creates a 3-D raise effect,
% otherwise it has a lowered effect.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType RaiseImage(Image *image,
const RectangleInfo *raise_info,const MagickBooleanType raise,
ExceptionInfo *exception)
{
#define AccentuateFactor ScaleCharToQuantum(135)
#define HighlightFactor ScaleCharToQuantum(190)
#define ShadowFactor ScaleCharToQuantum(190)
#define RaiseImageTag "Raise/Image"
#define TroughFactor ScaleCharToQuantum(135)
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
Quantum
foreground,
background;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(raise_info != (RectangleInfo *) NULL);
if ((image->columns <= (raise_info->width << 1)) ||
(image->rows <= (raise_info->height << 1)))
ThrowBinaryException(OptionError,"ImageSizeMustExceedBevelWidth",
image->filename);
foreground=QuantumRange;
background=(Quantum) 0;
if (raise == MagickFalse)
{
foreground=(Quantum) 0;
background=QuantumRange;
}
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Raise image.
*/
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,raise_info->height,1)
#endif
for (y=0; y < (ssize_t) raise_info->height; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < y; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*AccentuateFactor+
(double) foreground*(QuantumRange-AccentuateFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-2*raise_info->height,1)
#endif
for (y=(ssize_t) raise_info->height; y < (ssize_t) (image->rows-raise_info->height); y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) raise_info->width; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-raise_info->width); x++)
q+=GetPixelChannels(image);
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows-raise_info->height,1)
#endif
for (y=(ssize_t) (image->rows-raise_info->height); y < (ssize_t) image->rows; y++)
{
register ssize_t
i,
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) (image->rows-y); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*HighlightFactor+(double)
foreground*(QuantumRange-HighlightFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) (image->columns-(image->rows-y)); x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*TroughFactor+
(double) background*(QuantumRange-TroughFactor)));
}
q+=GetPixelChannels(image);
}
for ( ; x < (ssize_t) image->columns; x++)
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumScale*((double) q[i]*ShadowFactor+(double)
background*(QuantumRange-ShadowFactor)));
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_RaiseImage)
#endif
proceed=SetImageProgress(image,RaiseImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
|
GB_binop__iseq_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB)
// A.*B function (eWiseMult): GB (_AemultB_02__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_03__iseq_int8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__iseq_int8)
// A*D function (colscale): GB (_AxD__iseq_int8)
// D*A function (rowscale): GB (_DxB__iseq_int8)
// C+=B function (dense accum): GB (_Cdense_accumB__iseq_int8)
// C+=b function (dense accum): GB (_Cdense_accumb__iseq_int8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__iseq_int8)
// C=scalar+B GB (_bind1st__iseq_int8)
// C=scalar+B' GB (_bind1st_tran__iseq_int8)
// C=A+scalar GB (_bind2nd__iseq_int8)
// C=A'+scalar GB (_bind2nd_tran__iseq_int8)
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT8 || GxB_NO_ISEQ_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__iseq_int8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *restrict Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__iseq_int8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__iseq_int8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__iseq_int8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__iseq_int8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB (_bind1st_tran__iseq_int8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB (_bind2nd_tran__iseq_int8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
magsac.h | #pragma once
#include <limits>
#include <chrono>
#include <memory>
#include "model.h"
#include "model_score.h"
#include "sampler.h"
#include "uniform_sampler.h"
#include <math.h>
#include "gamma_values.cpp"
#ifdef _WIN32
#include <ppl.h>
#endif
#include <gflags/gflags.h>
#include <glog/logging.h>
template <class DatumType, class ModelEstimator>
class MAGSAC
{
public:
enum Version {
// The original version of MAGSAC. It works well, however, can be quite slow in many cases.
MAGSAC_ORIGINAL,
// The recently proposed MAGSAC++ algorithm which keeps the accuracy of the original MAGSAC but is often orders of magnitude faster.
MAGSAC_PLUS_PLUS };
MAGSAC(const Version magsac_version_ = Version::MAGSAC_PLUS_PLUS) :
time_limit(std::numeric_limits<double>::max()), //
desired_fps(-1),
iteration_limit(std::numeric_limits<size_t>::max()),
maximum_threshold(10.0),
apply_post_processing(true),
mininum_iteration_number(50),
partition_number(5),
core_number(1),
number_of_irwls_iters(1),
interrupting_threshold(1.0),
last_iteration_number(0),
log_confidence(0),
point_number(0),
magsac_version(magsac_version_)
{
}
~MAGSAC() {}
// A function to run MAGSAC.
bool run(
const cv::Mat &points_, // The input data points
const double confidence_, // The required confidence in the results
ModelEstimator& estimator_, // The model estimator
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_, // The sampler used
gcransac::Model &obtained_model_, // The estimated model parameters
int &iteration_number_, // The number of iterations done
ModelScore &model_score_); // The score of the estimated model
// A function to set the maximum inlier-outlier threshold
void setMaximumThreshold(const double maximum_threshold_)
{
maximum_threshold = maximum_threshold_;
}
// A function to set the inlier-outlier threshold used for speeding up the procedure
// and for determining the required number of iterations.
void setReferenceThreshold(const double threshold_)
{
interrupting_threshold = threshold_;
}
double getReferenceThreshold()
{
return interrupting_threshold;
}
// Setting the flag determining if post-processing is needed
void applyPostProcessing(bool value_)
{
apply_post_processing = value_;
}
// A function to set the maximum number of iterations
void setIterationLimit(size_t iteration_limit_)
{
iteration_limit = iteration_limit_;
}
// A function to set the minimum number of iterations
void setMinimumIterationNumber(size_t mininum_iteration_number_)
{
mininum_iteration_number = mininum_iteration_number_;
}
// A function to set the number of cores used in the original MAGSAC algorithm.
// In MAGSAC++, it is not used. Note that when multiple MAGSACs run in parallel,
// it is beneficial to keep the core number one for each independent MAGSAC.
// Otherwise, the threads will act weirdly.
void setCoreNumber(size_t core_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
LOG(ERROR) << "Setting the core number for MAGSAC++ is deprecated.";
core_number = core_number_;
}
// Setting the number of partitions used in the original MAGSAC algorithm
// to speed up the procedure. In MAGSAC++, this parameter is not used.
void setPartitionNumber(size_t partition_number_)
{
if (magsac_version == MAGSAC_PLUS_PLUS)
LOG(ERROR) << "Setting the partition number for MAGSAC++ is deprecated.";
partition_number = partition_number_;
}
// A function to set a desired minimum frames-per-second (FPS) value.
void setFPS(int fps_)
{
desired_fps = fps_; // The required FPS.
// The time limit which the FPS implies
time_limit = fps_ <= 0 ?
std::numeric_limits<double>::max() :
1.0 / fps_;
}
// The post-processing algorithm applying sigma-consensus to the input model once.
bool postProcessing(
const cv::Mat &points, // All data points
const gcransac::Model &so_far_the_best_model, // The input model to be improved
gcransac::Model &output_model, // The improved model parameters
ModelScore &output_score, // The score of the improved model
const ModelEstimator &estimator); // The model estimator
// The function determining the quality/score of a model using the original MAGSAC
// criterion. Note that this function is significantly slower than the quality
// function of MAGSAC++.
void getModelQuality(
const cv::Mat& points_, // All data points
const gcransac::Model& model_, // The input model
const ModelEstimator& estimator_, // The model estimator
double& marginalized_iteration_number_, // The required number of iterations marginalized over the noise scale
double& score_); // The score/quality of the model
// The function determining the quality/score of a
// model using the MAGSAC++ criterion.
void getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_); // The score of the previous so-far-the-best model
size_t number_of_irwls_iters;
protected:
Version magsac_version; // The version of MAGSAC used
size_t iteration_limit; // Maximum number of iterations allowed
size_t mininum_iteration_number; // Minimum number of iteration before terminating
double maximum_threshold; // The maximum sigma value
size_t core_number; // Number of core used in sigma-consensus
double time_limit; // A time limit after the algorithm is interrupted
int desired_fps; // The desired FPS (TODO: not tested with MAGSAC)
bool apply_post_processing; // Decides if the post-processing step should be applied
int point_number; // The current point number
int last_iteration_number; // The iteration number implied by the last run of sigma-consensus
double log_confidence; // The logarithm of the required confidence
size_t partition_number; // Number of partitions used to speed up sigma-consensus
double interrupting_threshold; // A threshold to speed up MAGSAC by interrupting the sigma-consensus procedure whenever there is no chance of being better than the previous so-far-the-best model
bool sigmaConsensus(
const cv::Mat& points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore& score_,
const ModelEstimator& estimator_,
const ModelScore& best_score_);
bool sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_);
};
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::run(
const cv::Mat& points_,
const double confidence_,
ModelEstimator& estimator_,
gcransac::sampler::Sampler<cv::Mat, size_t> &sampler_,
gcransac::Model& obtained_model_,
int& iteration_number_,
ModelScore &model_score_)
{
// Initialize variables
std::chrono::time_point<std::chrono::system_clock> start, end; // Variables for time measuring: start and end times
std::chrono::duration<double> elapsed_seconds; // Variables for time measuring: elapsed time
log_confidence = log(1.0 - confidence_); // The logarithm of 1 - confidence
point_number = points_.rows; // Number of points
constexpr size_t sample_size = estimator_.sampleSize(); // The sample size required for the estimation
size_t max_iteration = iteration_limit; // The maximum number of iterations initialized to the iteration limit
int iteration = 0; // Current number of iterations
gcransac::Model so_far_the_best_model; // Current best model
ModelScore so_far_the_best_score; // The score of the current best model
std::unique_ptr<size_t[]> minimal_sample(new size_t[sample_size]); // The sample used for the estimation
std::vector<size_t> pool(points_.rows);
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
pool[point_idx] = point_idx;
if (points_.rows < sample_size)
{
LOG(WARNING) << "There are not enough points for applying robust estimation. Minimum is "
<< static_cast<int>(sample_size)
<< "; while "
<< static_cast<int>(points_.rows)
<< " are given.";
return false;
}
// Set the start time variable if there is some time limit set
if (desired_fps > -1)
start = std::chrono::system_clock::now();
constexpr size_t max_unsuccessful_model_generations = 50;
// Main MAGSAC iteration
while (mininum_iteration_number > iteration ||
iteration < max_iteration)
{
// Increase the current iteration number
++iteration;
// Sample a minimal subset
std::vector<gcransac::Model> models; // The set of estimated models
size_t unsuccessful_model_generations = 0; // The number of unsuccessful model generations
// Try to select a minimal sample and estimate the implied model parameters
while (++unsuccessful_model_generations < max_unsuccessful_model_generations)
{
// Get a minimal sample randomly
if (!sampler_.sample(pool, // The index pool from which the minimal sample can be selected
minimal_sample.get(), // The minimal sample
sample_size)) // The size of a minimal sample
continue;
// Check if the selected sample is valid before estimating the model
// parameters which usually takes more time.
if (!estimator_.isValidSample(points_, // All points
minimal_sample.get())) // The current sample
continue;
// Estimate the model from the minimal sample
if (estimator_.estimateModel(points_, // All data points
minimal_sample.get(), // The selected minimal sample
&models)) // The estimated models
break;
}
// If the method was not able to generate any usable models, break the cycle.
iteration += unsuccessful_model_generations - 1;
// Select the so-far-the-best from the estimated models
for (const auto &model : models)
{
ModelScore score; // The score of the current model
gcransac::Model refined_model; // The refined model parameters
// Apply sigma-consensus to refine the model parameters by marginalizing over the noise level sigma
bool success;
if (magsac_version == Version::MAGSAC_ORIGINAL)
success = sigmaConsensus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
else
success = sigmaConsensusPlusPlus(points_,
model,
refined_model,
score,
estimator_,
so_far_the_best_score);
// Continue if the model was rejected
if (!success || score.score == -1)
continue;
// Save the iteration number when the current model is found
score.iteration = iteration;
// Update the best model parameters if needed
if (so_far_the_best_score < score)
{
so_far_the_best_model = refined_model; // Update the best model parameters
so_far_the_best_score = score; // Update the best model's score
max_iteration = MIN(max_iteration, last_iteration_number); // Update the max iteration number, but do not allow to increase
}
}
// Update the time parameters if a time limit is set
if (desired_fps > -1)
{
end = std::chrono::system_clock::now();
elapsed_seconds = end - start;
// Interrupt if the time limit is exceeded
if (elapsed_seconds.count() > time_limit)
break;
}
}
// Apply sigma-consensus as a post processing step if needed and the estimated model is valid
if (apply_post_processing)
{
// TODO
}
obtained_model_ = so_far_the_best_model;
iteration_number_ = iteration;
model_score_ = so_far_the_best_score;
return so_far_the_best_score.score > 0;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::postProcessing(
const cv::Mat &points_,
const gcransac::Model &model_,
gcransac::Model &refined_model_,
ModelScore &refined_score_,
const ModelEstimator &estimator_)
{
LOG(WARNING) << "Sigma-consensus++ is not implemented yet as post-processing.";
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// Set up the parameters
constexpr double L = 1.05;
constexpr double k = ModelEstimator::getSigmaQuantile();
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
constexpr size_t sample_size = estimator_.sampleSize();
static auto comparator = [](std::pair<double, int> left, std::pair<double, int> right) { return left.first < right.first; };
const int point_number = points_.rows;
double current_maximum_sigma = this->maximum_threshold;
// Calculating the residuals
std::vector< std::pair<double, size_t> > all_residuals;
all_residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of inliers which should be exceeded
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
all_residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
std::vector<gcransac::Model> sigma_models;
std::vector<size_t> sigma_inliers;
std::vector<double> final_weights;
// The number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// Sort the residuals in ascending order
std::sort(all_residuals.begin(), all_residuals.end(), comparator);
// The maximum threshold is set to be slightly bigger than the distance of the
// farthest possible inlier.
current_maximum_sigma =
all_residuals.back().first + std::numeric_limits<double>::epsilon();
const double sigma_step = current_maximum_sigma / partition_number;
last_iteration_number = 10000;
score_.score = 0;
// The weights calculated by each parallel process
std::vector<std::vector<double>> point_weights_par(partition_number, std::vector<double>(possible_inlier_number, 0));
// If OpenMP is used, calculate things in parallel
#ifdef USE_OPENMP
#pragma omp parallel for num_threads(core_number)
for (int partition_idx = 0; partition_idx < partition_number; ++partition_idx)
{
// The maximum sigma value in the current partition
const double max_sigma = (partition_idx + 1) * sigma_step;
// Find the last element which has smaller distance than 'max_threshold'
// Since the vector is ordered binary search can be used to find that particular element.
const auto &last_element = std::upper_bound(all_residuals.begin(), all_residuals.end(), std::make_pair(max_sigma, 0), comparator);
const size_t sigma_inlier_number = last_element - all_residuals.begin();
// Put the indices into a vector
std::vector<size_t> sigma_inliers;
sigma_inliers.reserve(sigma_inlier_number);
// Store the points which are closer than the current sigma limit
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inlier_number; ++relative_point_idx)
sigma_inliers.emplace_back(all_residuals[relative_point_idx].second);
// Check if there are enough inliers to fit a model
if (sigma_inliers.size() > sample_size)
{
// Estimating the model which the current set of inliers imply
std::vector<gcransac::Model> sigma_models;
estimator_.estimateModelNonminimal(points_,
&(sigma_inliers)[0],
sigma_inlier_number,
&sigma_models);
// If the estimation was successful calculate the implied probabilities
if (sigma_models.size() == 1)
{
const double max_sigma_squared_2 = 2 * max_sigma * max_sigma;
double residual_i_2, // The residual of the i-th point
probability_i; // The probability of the i-th point
// Iterate through all points to estimate the related probabilities
for (size_t relative_point_idx = 0; relative_point_idx < sigma_inliers.size(); ++relative_point_idx)
{
// TODO: Replace with Chi-square instead of normal distribution
const size_t &point_idx = sigma_inliers[relative_point_idx];
// Calculate the residual of the current point
residual_i_2 = estimator_.squaredResidual(points_.row(point_idx),
sigma_models[0]);
// Calculate the probability of the i-th point assuming Gaussian distribution
// TODO: replace by Chi-square distribution
probability_i = exp(-residual_i_2 / max_sigma_squared_2);
// Store the probability of the i-th point coming from the current partition
point_weights_par[partition_idx][relative_point_idx] += probability_i;
}
}
}
}
#else
LOG(ERROR) << "Not implemented yet.";
#endif
// The weights used for the final weighted least-squares fitting
// If point normalization is applied the indexing of the weights differs.
// In that case
// final_weights[i] is the weight of inlier[i]-th point
// Otherwise,
// final_weights[i] is the weight of i-th point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.reserve(possible_inlier_number);
else
final_weights.resize(point_number, 0);
// Collect all points which has higher probability of being inlier than zero
sigma_inliers.reserve(possible_inlier_number);
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
// Calculate the weight of the current point
double weight = 0.0;
for (size_t partition_idx = 0; partition_idx < partition_number; ++partition_idx)
weight += point_weights_par[partition_idx][point_idx];
// If the weight is approx. zero, continue.
if (weight < std::numeric_limits<double>::epsilon())
continue;
// Store the index and weight of the current point
sigma_inliers.emplace_back(all_residuals[point_idx].second);
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
final_weights.emplace_back(weight);
else
final_weights[point_idx] = weight;
}
// If there are fewer inliers than the size of the minimal sample interupt the procedure
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(final_weights)[0])) // Weights of points
return false;
bool is_model_updated = false;
if (sigma_models.size() == 1 && // If only a single model is estimated
estimator_.isValidModel(sigma_models.back(),
points_,
sigma_inliers,
&(sigma_inliers)[0],
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = sigma_models.back();
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQuality(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
marginalized_iteration_number, // The marginalized inlier ratio
score_.score); // The marginalized score
if (marginalized_iteration_number < 0 || std::isnan(marginalized_iteration_number))
last_iteration_number = std::numeric_limits<int>::max();
else
last_iteration_number = static_cast<int>(round(marginalized_iteration_number));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
bool MAGSAC<DatumType, ModelEstimator>::sigmaConsensusPlusPlus(
const cv::Mat &points_,
const gcransac::Model& model_,
gcransac::Model& refined_model_,
ModelScore &score_,
const ModelEstimator &estimator_,
const ModelScore &best_score_)
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// TODO: check
constexpr double C = ModelEstimator::getC();
// The size of a minimal sample used for the estimation
constexpr size_t sample_size = estimator_.sampleSize();
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof = std::pow(2.0, dof_minus_one_per_two);
// Calculating C * 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double C_times_two_ad_dof = C * two_ad_dof;
// Calculating the gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_value = tgamma(dof_minus_one_per_two);
// Calculating the upper incomplete gamma value of (DoF - 1) / 2 with k^2 / 2.
constexpr double gamma_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculating the lower incomplete gamma value of (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double gamma_difference = gamma_value - gamma_k;
// The number of points provided
const int point_number = points_.rows;
// The manually set maximum inlier-outlier threshold
double current_maximum_sigma = this->maximum_threshold;
// Calculating the pairs of (residual, point index).
std::vector< std::pair<double, size_t> > residuals;
// Occupy the maximum required memory to avoid doing it later.
residuals.reserve(point_number);
// If it is not the first run, consider the previous best and interrupt the validation when there is no chance of being better
if (best_score_.inlier_number > 0)
{
// Number of points close to the previous so-far-the-best model.
// This model should have more inliers.
int points_remaining = best_score_.inlier_number;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (int point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
--points_remaining;
}
// Interrupt if there is no chance of being better
// TODO: replace this part by SPRT test
if (point_number - point_idx < points_remaining)
return false;
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = best_score_.inlier_number - points_remaining;
}
else
{
// The number of really close points
size_t points_close = 0;
// Collect the points which are closer than the threshold which the maximum sigma implies
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), model_);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
}
// Models fit by weighted least-squares fitting
std::vector<gcransac::Model> sigma_models;
// Points used in the weighted least-squares fitting
std::vector<size_t> sigma_inliers;
// Weights used in the the weighted least-squares fitting
std::vector<double> sigma_weights;
// Number of points considered in the fitting
const size_t possible_inlier_number = residuals.size();
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_inliers.reserve(possible_inlier_number);
// Occupy the memory to avoid doing it inside the calculation possibly multiple times
sigma_weights.reserve(possible_inlier_number);
// Calculate 2 * \sigma_{max}^2 a priori
const double squared_sigma_max_2 = current_maximum_sigma * current_maximum_sigma * 2.0;
// Divide C * 2^(DoF - 1) by \sigma_{max} a priori
const double one_over_sigma = C_times_two_ad_dof / current_maximum_sigma;
// Calculate the weight of a point with 0 residual (i.e., fitting perfectly) a priori
const double weight_zero = one_over_sigma * gamma_difference;
// Initialize the polished model with the initial one
gcransac::Model polished_model = model_;
// A flag to determine if the initial model has been updated
bool updated = false;
// Do the iteratively re-weighted least squares fitting
for (size_t iterations = 0; iterations < number_of_irwls_iters; ++iterations)
{
// If the current iteration is not the first, the set of possibly inliers
// (i.e., points closer than the maximum threshold) have to be recalculated.
if (iterations > 0)
{
// The number of points close to the model
size_t points_close = 0;
// Remove everything from the residual vector
residuals.clear();
// Collect the points which are closer than the maximum threshold
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual = estimator_.residual(points_.row(point_idx), polished_model);
if (current_maximum_sigma > residual)
{
// Store the residual of the current point and its index
residuals.emplace_back(std::make_pair(residual, point_idx));
// Count points which are closer than a reference threshold to speed up the procedure
if (residual < interrupting_threshold)
++points_close;
}
}
// Store the number of really close inliers just to speed up the procedure
// by interrupting the next verifications.
score_.inlier_number = points_close;
// Number of points closer than the threshold
const size_t possible_inlier_number = residuals.size();
// Clear the inliers and weights
sigma_inliers.clear();
sigma_weights.clear();
// Occupy the memory for the inliers and weights
sigma_inliers.reserve(possible_inlier_number);
sigma_weights.reserve(possible_inlier_number);
}
if constexpr (!ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.resize(point_number, 0);
// Calculate the weight of each point
for (const auto &[residual, idx] : residuals)
{
// The weight
double weight = 0.0;
// If the residual is ~0, the point fits perfectly and it is handled differently
if (residual < std::numeric_limits<double>::epsilon())
weight = weight_zero;
else
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_gammas * squared_residual / squared_sigma_max_2);
// Put the index of the point into the vector of points used for the least squares fitting
sigma_inliers.emplace_back(idx);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_gamma_number < x)
x = stored_gamma_number;
// Calculate the weight of the point
weight = one_over_sigma * (stored_gamma_values[x] - gamma_k);
}
// Store the weight of the point
if constexpr (ModelEstimator::doesNormalizationForNonMinimalFitting())
sigma_weights.emplace_back(weight);
else
sigma_weights[idx] = weight;
}
// If there are fewer than the minimum point close to the model,
// terminate.
if (sigma_inliers.size() < sample_size)
return false;
// Estimate the model parameters using weighted least-squares fitting
if (!estimator_.estimateModelNonminimal(
points_, // All input points
&(sigma_inliers)[0], // Points which have higher than 0 probability of being inlier
static_cast<int>(sigma_inliers.size()), // Number of possible inliers
&sigma_models, // Estimated models
&(sigma_weights)[0])) // Weights of points
{
// If the estimation failed and the iteration was never successfull,
// terminate with failure.
if (iterations == 0)
return false;
// Otherwise, if the iteration was successfull at least once,
// simply break it.
break;
}
// Update the model parameters
polished_model = sigma_models[0];
// Clear the vector of models and keep only the best
sigma_models.clear();
// The model has been updated
updated = true;
}
bool is_model_updated = false;
if (updated && // If the model has been updated
estimator_.isValidModel(polished_model,
points_,
sigma_inliers,
&(sigma_inliers[0]),
interrupting_threshold,
is_model_updated)) // and it is valid
{
// Return the refined model
refined_model_ = polished_model;
// Calculate the score of the model and the implied iteration number
double marginalized_iteration_number;
getModelQualityPlusPlus(points_, // All the input points
refined_model_, // The estimated model
estimator_, // The estimator
score_.score, // The marginalized score
best_score_.score); // The score of the previous so-far-the-best model
// Update the iteration number
last_iteration_number =
log_confidence / log(1.0 - std::pow(static_cast<double>(score_.inlier_number) / point_number, sample_size));
return true;
}
return false;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQualityPlusPlus(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &score_, // The score to be calculated
const double &previous_best_score_) // The score of the previous so-far-the-best model
{
// The degrees of freedom of the data from which the model is estimated.
// E.g., for models coming from point correspondences (x1,y1,x2,y2), it is 4.
constexpr size_t degrees_of_freedom = ModelEstimator::getDegreesOfFreedom();
// A 0.99 quantile of the Chi^2-distribution to convert sigma values to residuals
constexpr double k = ModelEstimator::getSigmaQuantile();
// A multiplier to convert residual values to sigmas
constexpr double threshold_to_sigma_multiplier = 1.0 / k;
// Calculating k^2 / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double squared_k_per_2 = k * k / 2.0;
// Calculating (DoF - 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_minus_one_per_two = (degrees_of_freedom - 1.0) / 2.0;
// Calculating (DoF + 1) / 2 which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
constexpr double dof_plus_one_per_two = (degrees_of_freedom + 1.0) / 2.0;
// TODO: check
constexpr double C = 0.25;
// Calculating 2^(DoF - 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_minus_one = std::pow(2.0, dof_minus_one_per_two);
// Calculating 2^(DoF + 1) which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
static const double two_ad_dof_plus_one = std::pow(2.0, dof_plus_one_per_two);
// Calculate the gamma value of k
constexpr double gamma_value_of_k = ModelEstimator::getUpperIncompleteGammaOfK();
// Calculate the lower incomplete gamma value of k
constexpr double lower_gamma_value_of_k = ModelEstimator::getLowerIncompleteGammaOfK();
// The number of points provided
const int point_number = points_.rows;
// The previous best loss
const double previous_best_loss = 1.0 / previous_best_score_;
// Convert the maximum threshold to a sigma value
const double maximum_sigma = threshold_to_sigma_multiplier * maximum_threshold;
// Calculate the squared maximum sigma
const double maximum_sigma_2 = maximum_sigma * maximum_sigma;
// Calculate \sigma_{max}^2 / 2
const double maximum_sigma_2_per_2 = maximum_sigma_2 / 2.0;
// Calculate 2 * \sigma_{max}^2
const double maximum_sigma_2_times_2 = maximum_sigma_2 * 2.0;
// Calculate the loss implied by an outlier
const double outlier_loss = maximum_sigma * two_ad_dof_minus_one * lower_gamma_value_of_k;
// Calculating 2^(DoF + 1) / \sigma_{max} which will be used for the estimation and,
// due to being constant, it is better to calculate it a priori.
const double two_ad_dof_plus_one_per_maximum_sigma = two_ad_dof_plus_one / maximum_sigma;
// The loss which a point implies
double loss = 0.0,
// The total loss regarding the current model
total_loss = 0.0;
// Iterate through all points to calculate the implied loss
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, consider it outlier
// and add the loss implied to the total loss.
if (maximum_threshold < residual)
loss = outlier_loss;
else // Otherwise, consider the point inlier, and calculate the implied loss
{
// Calculate the squared residual
const double squared_residual = residual * residual;
// Divide the residual by the 2 * \sigma^2
const double squared_residual_per_sigma = squared_residual / maximum_sigma_2_times_2;
// Get the position of the gamma value in the lookup table
size_t x = round(precision_of_stored_incomplete_gammas * squared_residual_per_sigma);
// If the sought gamma value is not stored in the lookup, return the closest element
if (stored_incomplete_gamma_number < x)
x = stored_incomplete_gamma_number;
// Calculate the loss implied by the current point
loss = maximum_sigma_2_per_2 * stored_lower_incomplete_gamma_values[x] +
squared_residual / 4.0 * (stored_complete_gamma_values[x] -
gamma_value_of_k);
loss = loss * two_ad_dof_plus_one_per_maximum_sigma;
}
// Update the total loss
total_loss += loss;
// Break the validation if there is no chance of being better than the previous
// so-far-the-best model.
if (previous_best_loss < total_loss)
break;
}
// Calculate the score of the model from the total loss
score_ = 1.0 / total_loss;
}
template <class DatumType, class ModelEstimator>
void MAGSAC<DatumType, ModelEstimator>::getModelQuality(
const cv::Mat &points_, // All data points
const gcransac::Model &model_, // The model parameter
const ModelEstimator &estimator_, // The model estimator class
double &marginalized_iteration_number_, // The marginalized iteration number to be calculated
double &score_) // The score to be calculated
{
// Set up the parameters
constexpr size_t sample_size = estimator_.sampleSize();
const size_t point_number = points_.rows;
// Getting the inliers
std::vector<std::pair<double, size_t>> all_residuals;
all_residuals.reserve(point_number);
double max_distance = 0;
for (size_t point_idx = 0; point_idx < point_number; ++point_idx)
{
// Calculate the residual of the current point
const double residual =
estimator_.residualForScoring(points_.row(point_idx), model_.descriptor);
// If the residual is smaller than the maximum threshold, add it to the set of possible inliers
if (maximum_threshold > residual)
{
max_distance = MAX(max_distance, residual);
all_residuals.emplace_back(std::make_pair(residual, point_idx));
}
}
// Set the maximum distance to be slightly bigger than that of the farthest possible inlier
max_distance = max_distance +
std::numeric_limits<double>::epsilon();
// Number of possible inliers
const size_t possible_inlier_number = all_residuals.size();
// The extent of a partition
const double threshold_step = max_distance / partition_number;
// The maximum threshold considered in each partition
std::vector<double> thresholds(partition_number);
std::vector<double> thresholds_squared(partition_number);
std::vector<double> thresholds_2_squared(partition_number);
// Calculating the thresholds for each partition
for (size_t i = 0; i < partition_number; ++i)
{
thresholds[i] = (i + 1) * threshold_step;
thresholds_squared[i] = thresholds[i] * thresholds[i];
thresholds_2_squared[i] = 2 * thresholds_squared[i];
}
double residual_i, // Residual of the i-th point
residual_i_squared, // Squared residual of the i-th poin
probability_i; // Probability of the i-th point given the model
std::vector<double> inliers(partition_number, 0), // RANSAC score for each partition
probabilities(partition_number, 1); // Probabilities for each partition
for (size_t point_idx = 0; point_idx < possible_inlier_number; ++point_idx)
{
residual_i = all_residuals[point_idx].first;
residual_i_squared = residual_i * residual_i;
for (size_t i = 0; i < partition_number; ++i)
{
if (residual_i < thresholds[i])
{
probability_i = 1.0 - residual_i_squared / thresholds_squared[i];
++inliers[i];
probabilities[i] += probability_i;
}
}
}
score_ = 0;
marginalized_iteration_number_ = 0.0;
for (auto i = 0; i < partition_number; ++i)
{
score_ += probabilities[i];
marginalized_iteration_number_ += log_confidence / log(1.0 - std::pow(inliers[i] / point_number, sample_size));
}
marginalized_iteration_number_ = marginalized_iteration_number_ / partition_number;
}
|
target-14.c | #include <omp.h>
#include <stdlib.h>
int
main ()
{
int d = omp_get_default_device ();
int id = omp_get_initial_device ();
int err;
void *p;
if (d < 0 || d >= omp_get_num_devices ())
d = id;
p = omp_target_alloc (128 * sizeof (int), d);
if (p == NULL)
return 0;
#pragma omp target is_device_ptr (p) if (d >= 0) device (d >= 0 ? d : 0)
{
int i, *q = (int *) p;
for (i = 0; i < 128; i++)
q[i] = i + 7;
}
#pragma omp target is_device_ptr (p) if (d >= 0) device (d >= 0 ? d : 0) map(from:err)
{
int i;
err = 0;
for (i = 0; i < 128; i++)
if (((int *) p)[i] != i + 7)
err = 1;
}
if (err)
abort ();
omp_target_free (p, d);
return 0;
}
|
inputVariables.c | /*
test input
handling of variable scopes:
private, firstprivate, lastprivate, threadprivate
By C. Liao
*/
#include <stdio.h>
#ifdef _OPENMP
#include "omp.h"
#endif
void foo (double y[]){}
int gi = 0;
static int m = 1;
double mm[117];
double mm2[117];
#pragma omp threadprivate(m,mm)
int
main (void)
{
int i;
int k_3;
int gj = 0;
int j = 5;
mm[0] = 9.0;
k_3 = 7 + mm[0];
#pragma omp parallel private (i) firstprivate(k_3) reduction(+:gi,gj) copyin(mm)
{
int k = 1;
#ifdef _OPENMP
i = omp_get_thread_num ();
#endif
printf ("Hello,world!\n");
#pragma omp critical
gi = i + j + k + k_3;
gi += i;
#pragma omp atomic
j++;
gj += m + mm[1];
foo (mm - 1);
foo (mm2 - 1);
}
printf ("gi is %d,gj is %d\n", gi, gj);
return 0;
}
|
phase.c | /*
* Copyright (C) 2018 by Benedict Paten (benedictpaten@gmail.com)
*
* Released under the MIT license, see LICENSE.txt
*/
#include <getopt.h>
#include <stdio.h>
#include <ctype.h>
#include <memory.h>
#include <hashTableC.h>
#include <unistd.h>
#include <time.h>
#include "marginVersion.h"
#include "margin.h"
#include "htsIntegration.h"
#include "helenFeatures.h"
/*
* Main functions
*/
void phase_usage() {
fprintf(stderr, "usage: margin phase <ALIGN_BAM> <REFERENCE_FASTA> <VARIANT_VCF> <PARAMS> [options]\n");
fprintf(stderr, "Version: %s \n\n", MARGIN_POLISH_VERSION_H);
fprintf(stderr, "Tags reads in ALIGN_BAM using variants in VARIANT_VCF.\n");
fprintf(stderr, "\nRequired arguments:\n");
fprintf(stderr, " ALIGN_BAM is the alignment of reads to the reference.\n");
fprintf(stderr, " REFERENCE_FASTA is the reference sequence BAM file in fasta format.\n");
fprintf(stderr, " VARIANT_VCF is the set of variants to use for phasing.\n");
fprintf(stderr, " PARAMS is the file with margin parameters.\n");
fprintf(stderr, "\nDefault options:\n");
fprintf(stderr, " -h --help : Print this help screen\n");
fprintf(stderr, " -a --logLevel : Set the log level [default = info]\n");
# ifdef _OPENMP
fprintf(stderr, " -t --threads : Set number of concurrent threads [default = 1]\n");
#endif
fprintf(stderr, " -o --outputBase : Name to use for output files [default = 'output']\n");
fprintf(stderr, " -r --region : If set, will only compute for given chromosomal region\n");
fprintf(stderr, " Format: chr:start_pos-end_pos (chr3:2000-3000)\n");
fprintf(stderr, " -p --depth : Will override the downsampling depth set in PARAMS\n");
fprintf(stderr, " -k --tempFilesToDisk : Write temporary files to disk (for --diploid or supplementary output)\n");
fprintf(stderr, "\nOutput options:\n");
fprintf(stderr, " -M --skipHaplotypeBAM : Do not write out phased BAM\n");
fprintf(stderr, " -V --skipPhasedVCF : Do not write out phased VCF\n");
fprintf(stderr, "\n");
}
int phase_main(int argc, char *argv[]) {
// Parameters / arguments
char *logLevelString = stString_copy("critical");
char *bamInFile = NULL;
char *paramsFile = NULL;
char *referenceFastaFile = NULL;
char *outputBase = stString_copy("output");
char *regionStr = NULL;
char *vcfFile = NULL;
int numThreads = 1;
int64_t maxDepth = -1;
bool inMemory = TRUE;
bool shouldOutputHaplotaggedBam = TRUE;
bool shouldOutputPhasedVcf = TRUE;
if (argc < 4) {
free(outputBase);
free(logLevelString);
phase_usage();
return 0;
}
bamInFile = stString_copy(argv[1]);
referenceFastaFile = stString_copy(argv[2]);
vcfFile = stString_copy(argv[3]);
paramsFile = stString_copy(argv[4]);
// Parse the options
while (1) {
static struct option long_options[] = {
{ "help", no_argument, 0, 'h' },
{ "logLevel", required_argument, 0, 'a' },
# ifdef _OPENMP
{ "threads", required_argument, 0, 't'},
#endif
{ "outputBase", required_argument, 0, 'o'},
{ "region", required_argument, 0, 'r'},
{ "depth", required_argument, 0, 'p'},
{ "tempFilesToDisk", no_argument, 0, 'k'},
{ "skipHaplotypeBAM", no_argument, 0, 'M'},
{ "skipPhasedVCF", no_argument, 0, 'V'},
{ 0, 0, 0, 0 } };
int option_index = 0;
int key = getopt_long(argc-2, &argv[2], "ha:o:p:t:r:kMV", long_options, &option_index);
if (key == -1) {
break;
}
switch (key) {
case 'a':
free(logLevelString);
logLevelString = stString_copy(optarg);
break;
case 'h':
phase_usage();
return 0;
case 'o':
free(outputBase);
outputBase = getFileBase(optarg, "output");
break;
case 'r':
regionStr = stString_copy(optarg);
break;
case 'p':
maxDepth = atoi(optarg);
if (maxDepth < 0) {
st_errAbort("Invalid maxDepth: %s", optarg);
}
break;
case 't':
numThreads = atoi(optarg);
if (numThreads <= 0) {
st_errAbort("Invalid thread count: %d", numThreads);
}
break;
case 'k':
inMemory = FALSE;
break;
case 'M':
shouldOutputHaplotaggedBam = FALSE;
break;
case 'V':
shouldOutputPhasedVcf = FALSE;
break;
default:
phase_usage();
free(outputBase);
free(logLevelString);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
return 0;
}
}
// sanity check (conflicting params)
if (!shouldOutputHaplotaggedBam && !shouldOutputPhasedVcf) {
st_errAbort("With --skipHaplotypeBAM and --skipPhasedVCF there will be no output.\n");
}
// sanity check (verify files exist)
if (access(bamInFile, R_OK) != 0) {
st_errAbort("Could not read from input bam file: %s\n", bamInFile);
char *idx = stString_print("%s.bai", bamInFile);
if (access(idx, R_OK) != 0) {
st_errAbort("BAM does not appear to be indexed: %s\n", bamInFile);
}
free(idx);
}
if (access(referenceFastaFile, R_OK) != 0) {
st_errAbort("Could not read from reference fastafile: %s\n", referenceFastaFile);
}
if (access(vcfFile, R_OK) != 0) {
st_errAbort("Could not read from vcf file: %s\n", vcfFile);
}
if (access(paramsFile, R_OK) != 0) {
st_errAbort("Could not read from params file: %s\n", paramsFile);
}
// Initialization from arguments
time_t startTime = time(NULL);
st_setLogLevelFromString(logLevelString);
free(logLevelString);
if (st_getLogLevel() >= info) {
st_setCallocDebug(true);
}
# ifdef _OPENMP
if (numThreads <= 0) {
numThreads = 1;
}
omp_set_num_threads(numThreads);
st_logCritical("Running OpenMP with %d threads.\n", omp_get_max_threads());
# endif
// Parse parameters
st_logCritical("> Parsing model parameters from file: %s\n", paramsFile);
Params *params = params_readParams(paramsFile);
// update depth (if set)
if (maxDepth >= 0) {
st_logCritical("> Changing maxDepth parameter from %"PRId64" to %"PRId64"\n", params->polishParams->maxDepth,
maxDepth);
params->polishParams->maxDepth = (uint64_t) maxDepth;
}
// Print a report of the parsed parameters
if (st_getLogLevel() == debug) {
params_printParameters(params, stderr);
}
// get vcf entries (if set)
stHash *vcfEntries = NULL;
if (vcfFile != NULL) {
vcfEntries = parseVcf2(vcfFile, regionStr, params);
}
// get valid contigs (to help bam chunker construction)
stList *vcfContigsTmp = stHash_getKeys(vcfEntries);
stSet *vcfContigs = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(vcfContigsTmp); i++) {
stSet_insert(vcfContigs, stList_get(vcfContigsTmp, i));
}
// get chunker for bam. if regionStr is NULL, it will be ignored
time_t chunkingStart = time(NULL);
BamChunker *bamChunker = bamChunker_construct2(bamInFile, regionStr, vcfContigs, params->polishParams, TRUE);
char *regionStrInformative = regionStr != NULL ? stString_copy(regionStr) : stString_join2(",", vcfContigsTmp);
st_logCritical(
"> Set up bam chunker in %"PRId64"s with chunk size %i and overlap %i (for region=%s), resulting in %i total chunks\n",
time(NULL) - chunkingStart, (int) bamChunker->chunkSize, (int) bamChunker->chunkBoundary,
regionStrInformative, bamChunker->chunkCount);
if (bamChunker->chunkCount == 0) {
st_errAbort("> Found no valid reads!\n");
}
free(regionStrInformative);
stList_destruct(vcfContigsTmp);
stSet_destruct(vcfContigs);
// print chunk info
char *outputChunksFile = stString_print("%s.chunks.csv", outputBase);
FILE *chunksOut = safe_fopen(outputChunksFile, "w");
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
BamChunk *c = stList_get(bamChunker->chunks, i);
fprintf(chunksOut, "%s,%"PRId64",%"PRId64",%"PRId64",%"PRId64"\n", c->refSeqName, c->chunkOverlapStart,
c->chunkOverlapEnd, c->chunkStart, c->chunkEnd);
}
fclose(chunksOut);
free(outputChunksFile);
// output chunker tracks intermediate output files
OutputChunkers *outputChunkers = outputChunkers_construct(numThreads, params, NULL, NULL, NULL, NULL,
".hap1", ".hap2", inMemory);
// (may) need to shuffle chunks
stList *chunkOrder = stList_construct3(0, (void (*)(void *)) stIntTuple_destruct);
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
stList_append(chunkOrder, stIntTuple_construct1(i));
}
if (params->polishParams->shuffleChunks) {
switch (params->polishParams->shuffleChunksMethod) {
case SCM_SIZE_DESC:
st_logCritical("> Ordering chunks by estimated depth\n");
stList_sort2(chunkOrder, compareBamChunkDepthByIndexInList, bamChunker->chunks);
stList_reverse(chunkOrder);
break;
case SCM_RANDOM:
st_logCritical("> Randomly shuffling chunks\n");
stList_shuffle(chunkOrder);
break;
}
}
// multiproccess the chunks, save to results
st_logCritical("> Setup complete, beginning run\n");
int64_t lastReportedPercentage = 0;
time_t polishStartTime = time(NULL);
# ifdef _OPENMP
#pragma omp parallel for schedule(dynamic,1)
# endif
for (int64_t i = 0; i < bamChunker->chunkCount; i++) {
int64_t chunkIdx = stIntTuple_get(stList_get(chunkOrder, i), 0);
// Time all chunks
time_t chunkStartTime = time(NULL);
// Get chunk
BamChunk *bamChunk = bamChunker_getChunk(bamChunker, chunkIdx);
// logging
char *logIdentifier;
bool logProgress = FALSE;
int64_t currentPercentage = (int64_t) (100 * i / bamChunker->chunkCount);
# ifdef _OPENMP
int64_t threadIdx = omp_get_thread_num();
logIdentifier = stString_print(" T%02d_C%05"PRId64, threadIdx, chunkIdx);
if (threadIdx == 0) {
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
}
# else
int64_t threadIdx = 0;
logIdentifier = stString_copy("");
if (currentPercentage != lastReportedPercentage) {
logProgress = TRUE;
lastReportedPercentage = currentPercentage;
}
# endif
// prints percentage complete and estimated time remaining
if (logProgress) {
// log progress
int64_t timeTaken = (int64_t) (time(NULL) - polishStartTime);
int64_t secondsRemaining = (int64_t) floor(1.0 * timeTaken / currentPercentage * (100 - currentPercentage));
char *timeDescriptor = (secondsRemaining == 0 && currentPercentage <= 50 ?
stString_print("unknown") : getTimeDescriptorFromSeconds(secondsRemaining));
st_logCritical("> Polishing %2"PRId64"%% complete (%"PRId64"/%"PRId64"). Estimated time remaining: %s\n",
currentPercentage, i, bamChunker->chunkCount, timeDescriptor);
free(timeDescriptor);
}
// Get reference string for chunk of alignment
char *chunkReference = getSequenceFromReference(referenceFastaFile, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
st_logInfo(">%s Going to process a chunk for reference sequence: %s, starting at: %i and ending at: %i\n",
logIdentifier, bamChunk->refSeqName, (int) bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd);
// get VCF string
stList *chunkVcfEntries = getVcfEntriesForRegion(vcfEntries, NULL, bamChunk->refSeqName,
bamChunk->chunkOverlapStart, bamChunk->chunkOverlapEnd, params);
updateVcfEntriesWithSubstringsAndPositions(chunkVcfEntries, chunkReference, strlen(chunkReference),
FALSE, params);
// Convert bam lines into corresponding reads and alignments
st_logInfo(" %s Parsing input reads from file: %s\n", logIdentifier, bamInFile);
stList *reads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
stList *filteredReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
extractReadSubstringsAtVariantPositions(bamChunk, chunkVcfEntries, reads, filteredReads, params->polishParams);
// do downsampling if appropriate
if (params->polishParams->maxDepth > 0) {
// get downsampling structures
stList *maintainedReads = stList_construct3(0, (void (*)(void *)) bamChunkRead_destruct);
bool didDownsample = downsampleBamChunkReadWithVcfEntrySubstringsViaFullReadLengthLikelihood(
params->polishParams->maxDepth, chunkVcfEntries, reads, maintainedReads, filteredReads);
// we need to destroy the discarded reads and structures
if (didDownsample) {
st_logInfo(" %s Downsampled from %"PRId64" to %"PRId64" reads\n", logIdentifier,
stList_length(reads), stList_length(maintainedReads));
// still has all the old reads, need to not free these
stList_setDestructor(reads, NULL);
stList_destruct(reads);
// and keep the filtered reads
reads = maintainedReads;
}
// no downsampling, we just need to free the (empty) objects
else {
assert(stList_length(maintainedReads) == 0);
stList_destruct(maintainedReads);
}
}
time_t primaryPhasingStart = time(NULL);
// iteratively find bubbles
int64_t bubbleFindingIteration = 0;
BubbleGraph *bg = NULL;
stHash *readsToPSeqs = NULL;
stSet *readsBelongingToHap1 = NULL, *readsBelongingToHap2 = NULL;
stGenomeFragment *gf = NULL;
stReference *ref = NULL;
stList *vcfEntriesToBubbles = NULL;
// Get the bubble graph representation
bg = bubbleGraph_constructFromVCFAndBamChunkReadVcfEntrySubstrings(reads, chunkVcfEntries, params,
&vcfEntriesToBubbles);
// Now make a POA for each of the haplotypes
ref = bubbleGraph_getReference(bg, bamChunk->refSeqName, params);
gf = bubbleGraph_phaseBubbleGraph(bg, ref, reads, params, &readsToPSeqs);
stGenomeFragment_phaseBamChunkReads(gf, readsToPSeqs, reads, &readsBelongingToHap1, &readsBelongingToHap2,
params->phaseParams);
st_logInfo(" %s After phasing, of %i reads got %i reads partitioned into hap1 and %i reads partitioned "
"into hap2 (%i unphased)\n", logIdentifier, (int) stList_length(reads),
(int) stSet_size(readsBelongingToHap1), (int) stSet_size(readsBelongingToHap2),
(int) (stList_length(reads) - stSet_size(readsBelongingToHap1) -
stSet_size(readsBelongingToHap2)));
st_logInfo(" %s Phased primary reads in %d sec\n", logIdentifier, time(NULL) - primaryPhasingStart);
// should included filtered reads in output
// get reads
for (int64_t bcrIdx = 0; bcrIdx < stList_length(reads); bcrIdx++) {
BamChunkRead *bcr = stList_get(reads, bcrIdx);
if (!stSet_search(readsBelongingToHap1, bcr) && !stSet_search(readsBelongingToHap2, bcr)) {
// was filtered in some form
stList_append(filteredReads, bamChunkRead_constructCopy(bcr));
}
}
st_logInfo(" %s Assigning %"PRId64" filtered reads to haplotypes\n", logIdentifier, stList_length(filteredReads));
time_t filteredPhasingStart = time(NULL);
bubbleGraph_partitionFilteredReadsFromVcfEntries(filteredReads, gf, bg, vcfEntriesToBubbles, readsBelongingToHap1,
readsBelongingToHap2, params, logIdentifier);
st_logInfo(" %s Partitioned filtered reads in %d sec.\n", logIdentifier, time(NULL) - filteredPhasingStart);
// Output
outputChunkers_processChunkSequencePhased(outputChunkers, threadIdx, chunkIdx, bamChunk->refSeqName,
NULL, NULL, reads, readsBelongingToHap1, readsBelongingToHap2, gf,
params);
// save
// only use primary reads (not filteredReads) to track read phasing
updateOriginalVcfEntriesWithBubbleData(bamChunk, reads, bamChunker->readEnumerator, gf, bg,
vcfEntriesToBubbles, readsBelongingToHap1, readsBelongingToHap2, logIdentifier);
// Cleanup
if (chunkVcfEntries != NULL) stList_destruct(chunkVcfEntries);
stSet_destruct(readsBelongingToHap1);
stSet_destruct(readsBelongingToHap2);
bubbleGraph_destruct(bg);
stGenomeFragment_destruct(gf);
stReference_destruct(ref);
stHash_destruct(readsToPSeqs);
stList_destruct(vcfEntriesToBubbles);
free(chunkReference);
// report timing
if (st_getLogLevel() >= info) {
st_logInfo(">%s Chunk with ~%"PRId64" reads processed in %d sec\n",
logIdentifier, stList_length(reads) + stList_length(filteredReads), (int) (time(NULL) - chunkStartTime));
}
// final post-completion logging cleanup
stList_destruct(reads);
stList_destruct(filteredReads);
free(logIdentifier);
}
// for writing haplotyped chunks
stList *allReadIdsHap1 = stList_construct3(0, free);
stList *allReadIdsHap2 = stList_construct3(0, free);
// for writing vcf
bool *chunkWasSwitched = st_calloc(bamChunker->chunkCount, sizeof(bool));
// merge chunks
time_t mergeStartTime = time(NULL);
st_logCritical("> Starting merge\n");
outputChunkers_stitchAndTrackExtraData(outputChunkers, TRUE, bamChunker->chunkCount, allReadIdsHap1, allReadIdsHap2,
chunkWasSwitched);
time_t mergeEndTime = time(NULL);
char *tds = getTimeDescriptorFromSeconds((int) mergeEndTime - mergeStartTime);
st_logCritical("> Merging took %s\n", tds);
outputChunkers_destruct(outputChunkers);
free(tds);
tds = getTimeDescriptorFromSeconds((int) time(NULL) - mergeEndTime);
st_logCritical("> Merge cleanup took %s\n", tds);
free(tds);
// maybe write final haplotyped bams
if (shouldOutputHaplotaggedBam) {
// logging
time_t hapBamStart = time(NULL);
st_logInfo("> Writing final haplotyped BAMs\n");
// get all reads
stSet *allReadIdsForHaplotypingHap1 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
stSet *allReadIdsForHaplotypingHap2 = stSet_construct3(stHash_stringKey, stHash_stringEqualKey, NULL);
for (int64_t i = 0; i < stList_length(allReadIdsHap1); i++) {
stSet_insert(allReadIdsForHaplotypingHap1, stList_get(allReadIdsHap1, i));
}
for (int64_t i = 0; i < stList_length(allReadIdsHap2); i++) {
stSet_insert(allReadIdsForHaplotypingHap2, stList_get(allReadIdsHap2, i));
}
// write it
writeHaplotaggedBam(bamChunker->bamFile, outputBase, regionStr,
allReadIdsForHaplotypingHap1, allReadIdsForHaplotypingHap2, NULL, params, "");
// loggit
char *hapBamTDS = getTimeDescriptorFromSeconds(time(NULL) - hapBamStart);
st_logCritical("> Wrote haplotyped bams in %s\n", hapBamTDS);
// cleanup
free(hapBamTDS);
stSet_destruct(allReadIdsForHaplotypingHap1);
stSet_destruct(allReadIdsForHaplotypingHap2);
}
// maybe write VCF
if (shouldOutputPhasedVcf) {
// loggit
time_t vcfWriteStart = time(NULL);
char *outputVcfFile = stString_print("%s.phased.vcf", outputBase);
char *outputPhaseSetFile = stString_print("%s.phaseset.bed", outputBase);
st_logCritical("> Writing phased VCF to %s, phaseset info to %s\n", outputVcfFile, outputPhaseSetFile);
// write it
updateHaplotypeSwitchingInVcfEntries(bamChunker, chunkWasSwitched, vcfEntries);
writePhasedVcf(vcfFile, regionStr, outputVcfFile, outputPhaseSetFile, vcfEntries, params);
// loggit
char *phasedVcfTDS = getTimeDescriptorFromSeconds(time(NULL) - vcfWriteStart);
st_logCritical("> Wrote phased VCF in %s\n", phasedVcfTDS);
// cleanup
free(phasedVcfTDS);
free(outputVcfFile);
free(outputPhaseSetFile);
}
// cleanup
free(chunkWasSwitched);
bamChunker_destruct(bamChunker);
params_destruct(params);
if (regionStr != NULL) free(regionStr);
stList_destruct(chunkOrder);
free(vcfFile);
stHash_destruct(vcfEntries);
if (allReadIdsHap1 != NULL) stList_destruct(allReadIdsHap1);
if (allReadIdsHap2 != NULL) stList_destruct(allReadIdsHap2);
free(outputBase);
free(bamInFile);
free(referenceFastaFile);
free(paramsFile);
// log completion
char *timeDescriptor = getTimeDescriptorFromSeconds(time(NULL) - startTime);
st_logCritical("> Finished phasing in %s.\n", timeDescriptor);
free(timeDescriptor);
// while(1); // Use this for testing for memory leaks
return 0;
}
|
kmeans_h2o4gpu.h | /*!
* Copyright 2017-2018 H2O.ai, Inc.
* License Apache License Version 2.0 (see LICENSE for details)
*/
#pragma once
#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/host_vector.h>
#include "kmeans_centroids.h"
#include "kmeans_labels.h"
/**
* Copies data from srcdata to array
* @tparam T
* @param verbose Logging level
* @param ord Column on row order of data
* @param array Destination array
* @param srcdata Source data
* @param q Shard number (from 0 to n_gpu)
* @param n
* @param npergpu
* @param d
*/
template <typename T>
void copy_data(int verbose, const char ord, thrust::device_vector<T> &array,
const T *srcdata, int q, int n, size_t npergpu, int d) {
if (ord == 'c') {
thrust::host_vector<T> host_array(npergpu * d);
log_debug(verbose, "Copy data COL ORDER -> ROW ORDER");
for (size_t i = 0; i < npergpu * d; i++) {
size_t indexi = i % d; // col
size_t indexj = i / d + q * npergpu; // row (shifted by which gpu)
host_array[i] = srcdata[indexi * n + indexj];
}
array = host_array;
} else {
log_debug(verbose, "Copy data ROW ORDER not changed");
thrust::host_vector<T> host_array(srcdata + q * npergpu * d,
srcdata + q * npergpu * d + npergpu * d);
array = host_array;
}
}
template <typename T>
struct count_functor {
T *pairwise_distances_ptr;
int *counts_ptr;
int k;
int rows_per_run;
count_functor(T *_pairwise_distances_ptr, int *_counts_ptr, int _k,
int _rows_per_run) {
pairwise_distances_ptr = _pairwise_distances_ptr;
counts_ptr = _counts_ptr;
k = _k;
rows_per_run = _rows_per_run;
}
__device__ void operator()(int idx) const {
int closest_centroid_idx = 0;
T best_distance = pairwise_distances_ptr[idx];
// FIXME potentially slow due to striding
for (int i = 1; i < k; i++) {
T distance = pairwise_distances_ptr[idx + i * rows_per_run];
if (distance < best_distance) {
best_distance = distance;
closest_centroid_idx = i;
}
}
atomicAdd(&counts_ptr[closest_centroid_idx], 1);
}
};
/**
* Calculates closest centroid for each record and counts how many points are
* assigned to each centroid.
* @tparam T
* @param verbose
* @param num_gpu
* @param rows_per_gpu
* @param cols
* @param data
* @param data_dots
* @param centroids
* @param weights
* @param pairwise_distances
* @param labels
*/
template <typename T>
void count_pts_per_centroid(int verbose, int num_gpu, int rows_per_gpu,
int cols, thrust::device_vector<T> **data,
thrust::device_vector<T> **data_dots,
thrust::host_vector<T> centroids,
thrust::host_vector<T> &weights) {
int k = centroids.size() / cols;
#pragma omp parallel for
for (int i = 0; i < num_gpu; i++) {
thrust::host_vector<int> weights_tmp(weights.size());
CUDACHECK(cudaSetDevice(i));
thrust::device_vector<T> centroid_dots(k);
thrust::device_vector<T> d_centroids = centroids;
thrust::device_vector<int> counts(k);
kmeans::detail::batch_calculate_distances(
verbose, 0, rows_per_gpu, cols, k, *data[i], d_centroids, *data_dots[i],
centroid_dots,
[&](int rows_per_run, size_t offset,
thrust::device_vector<T> &pairwise_distances) {
auto counting = thrust::make_counting_iterator(0);
auto counts_ptr = thrust::raw_pointer_cast(counts.data());
auto pairwise_distances_ptr =
thrust::raw_pointer_cast(pairwise_distances.data());
thrust::for_each(counting, counting + rows_per_run,
count_functor<T>(pairwise_distances_ptr, counts_ptr,
k, rows_per_run));
});
kmeans::detail::memcpy(weights_tmp, counts);
kmeans::detail::streamsync(i);
for (int p = 0; p < k; p++) {
weights[p] += weights_tmp[p];
}
}
} |
5103.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "covariance.h"
/* Array initialization. */
static
void init_array (int m, int n,
DATA_TYPE *float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n))
{
int i, j;
*float_n = 1.2;
for (i = 0; i < M; i++)
for (j = 0; j < N; j++)
data[i][j] = ((DATA_TYPE) i*j) / M;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int m,
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m))
{
int i, j;
for (i = 0; i < m; i++)
for (j = 0; j < m; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, symmat[i][j]);
if ((i * m + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_covariance(int m, int n,
DATA_TYPE float_n,
DATA_TYPE POLYBENCH_2D(data,M,N,m,n),
DATA_TYPE POLYBENCH_2D(symmat,M,M,m,m),
DATA_TYPE POLYBENCH_1D(mean,M,m))
{
int i, j, j1, j2;
#pragma scop
/* Determine mean of column vectors of input data matrix */
#pragma omp parallel private(i, j, j2) num_threads(#P11) #same issue as atax
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 0; j < _PB_M; j++)
{
mean[j] = 0.0;
for (i = 0; i < _PB_N; i++)
mean[j] += data[i][j];
mean[j] /= float_n;
}
/* Center the column vectors. */
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (i = 0; i < _PB_N; i++)
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j = 0; j < _PB_M; j++)
{
data[i][j] -= mean[j];
}
}
/* Calculate the m * m covariance matrix. */
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j1 = 0; j1 < _PB_M; j1++)
{
#pragma omp parallel for schedule(dynamic, 4) simd num_threads(4)
for (j2 = j1; j2 < _PB_M; j2++)
{
symmat[j1][j2] = 0.0;
for (i = 0; i < _PB_N; i++)
symmat[j1][j2] += data[i][j1] * data[i][j2];
symmat[j2][j1] = symmat[j1][j2];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int m = M;
/* Variable declaration/allocation. */
DATA_TYPE float_n;
POLYBENCH_2D_ARRAY_DECL(data,DATA_TYPE,M,N,m,n);
POLYBENCH_2D_ARRAY_DECL(symmat,DATA_TYPE,M,M,m,m);
POLYBENCH_1D_ARRAY_DECL(mean,DATA_TYPE,M,m);
/* Initialize array(s). */
init_array (m, n, &float_n, POLYBENCH_ARRAY(data));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_covariance (m, n, float_n,
POLYBENCH_ARRAY(data),
POLYBENCH_ARRAY(symmat),
POLYBENCH_ARRAY(mean));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(m, POLYBENCH_ARRAY(symmat)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(data);
POLYBENCH_FREE_ARRAY(symmat);
POLYBENCH_FREE_ARRAY(mean);
return 0;
}
|
GB_unaryop__identity_uint16_uint8.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_uint8
// op(A') function: GB_tran__identity_uint16_uint8
// C type: uint16_t
// A type: uint8_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint8_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_UINT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_uint8
(
uint16_t *restrict Cx,
const uint8_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mm-omp-row.c | /**
*
* Matrix Multiplication - Shared-memory (OpenMP)
*
* CS3210
*
**/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include <sys/time.h>
#include <assert.h>
#include <omp.h>
#include <xmmintrin.h>
int size;
int threads;
typedef struct {
float ** element;
} matrix;
long long wall_clock_time() {
#ifdef LINUX
struct timespec tp;
clock_gettime(CLOCK_REALTIME, &tp);
return (long long)(tp.tv_nsec + (long long)tp.tv_sec * 1000000000LL);
#else
struct timeval tv;
gettimeofday(&tv, NULL);
return (long long)(tv.tv_usec * 1000 + (long long)tv.tv_sec * 1000000000LL);
#endif
}
/**
* Allocates memory for a matrix of size SIZE
* The memory is allocated row-major order, i.e.
* elements from the same row are allocated at contiguous
* memory addresses.
**/
void allocate_matrix(matrix* m)
{
int i;
// allocate array for all the rows
m->element = (float**)malloc(sizeof(float*) * size);
if (m->element == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
// allocate an array for each row of the matrix
for (i = 0; i < size; i++)
{
m->element[i] = (float*)malloc(sizeof(float) * size);
if (m->element[i] == NULL)
{
fprintf(stderr, "Out of memory\n");
exit(1);
}
}
}
/**
* Free the memory allocated to a matrix.
**/
void free_matrix(matrix* m) {
int i;
for (i = 0; i < size; i++)
{
free(m->element[i]);
}
free(m->element);
}
/**
* Initializes the elements of the matrix with
* random values between 0 and 9
**/
void init_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = rand() % 10;
}
}
/**
* Initializes the elements of the matrix with
* element 0.
**/
void init_matrix_zero(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
{
m.element[i][j] = 0.0;
}
}
/**
* Multiplies matrix @a with matrix @b storing
* the result in matrix @result
*
* The multiplication algorithm is the O(n^3)
* algorithm
*/
void mm(matrix a, matrix b, matrix result)
{
int i, j, k;
// Parallelize the multiplication
// Each thread will work on one iteration of the outer-most loop
// Variables are shared among threads (a, b, result)
// and each thread has its own private copy (i, j, k)
#pragma omp parallel for shared(a, b, result) private (i, j, k)
for (i = 0; i < size; i++)
for (j = 0; j < size; j++)
for(k = 0; k < size; k++)
result.element[i][j] += a.element[i][k] * b.element[j][k];
}
void print_matrix(matrix m)
{
int i, j;
for (i = 0; i < size; i++)
{
printf("row %4d: ", i);
for (j = 0; j < size; j++)
printf("%6.2f ", m.element[i][j]);
printf("\n");
}
}
void work()
{
matrix a, b, result;
long long before, after;
// Allocate memory for matrices
allocate_matrix(&a);
allocate_matrix(&b);
allocate_matrix(&result);
// Initialize matrix elements
init_matrix(a);
init_matrix(b);
// Perform parallel matrix multiplication
before = wall_clock_time();
mm(a, b, result);
after = wall_clock_time();
fprintf(stderr, "Matrix multiplication took %1.2f seconds\n", ((float)(after - before))/1000000000);
// Print the result matrix
// print_matrix(result);
}
int main(int argc, char ** argv)
{
srand(0);
printf("Usage: %s <size> <threads>\n", argv[0]);
if (argc >= 2)
size = atoi(argv[1]);
else
size = 1024;
if (argc >= 3)
threads = atoi(argv[2]);
else
threads = -1;
// Multiply the matrices
if (threads != -1)
{
omp_set_num_threads(threads);
}
#pragma omp parallel
{
threads = omp_get_num_threads();
}
printf("Matrix multiplication of size %d using %d threads\n", size, threads);
work();
return 0;
}
|
idasFoodWeb_bnd_omp.c | /*
* -----------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds and Ting Yan @ SMU
* Based on idaFoodWeb_bnd.c and parallelized with OpenMP
* -----------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2020, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
* -----------------------------------------------------------------
* Example program for IDAS: Food web problem.
*
* This example program (OpenMP version) uses the SUNBAND linear
* solver, and IDACalcIC for initial condition calculation.
*
* The mathematical problem solved in this example is a DAE system
* that arises from a system of partial differential equations after
* spatial discretization. The PDE system is a food web population
* model, with predator-prey interaction and diffusion on the unit
* square in two dimensions. The dependent variable vector is:
*
* 1 2 ns
* c = (c , c , ..., c ) , ns = 2 * np
*
* and the PDE's are as follows:
*
* i i i
* dc /dt = d(i)*(c + c ) + R (x,y,c) (i = 1,...,np)
* xx yy i
*
* i i
* 0 = d(i)*(c + c ) + R (x,y,c) (i = np+1,...,ns)
* xx yy i
*
* where the reaction terms R are:
*
* i ns j
* R (x,y,c) = c * (b(i) + sum a(i,j)*c )
* i j=1
*
* The number of species is ns = 2 * np, with the first np being
* prey and the last np being predators. The coefficients a(i,j),
* b(i), d(i) are:
*
* a(i,i) = -AA (all i)
* a(i,j) = -GG (i <= np , j > np)
* a(i,j) = EE (i > np, j <= np)
* all other a(i,j) = 0
* b(i) = BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i <= np)
* b(i) =-BB*(1+ alpha * x*y + beta*sin(4 pi x)*sin(4 pi y)) (i > np)
* d(i) = DPREY (i <= np)
* d(i) = DPRED (i > np)
*
* The various scalar parameters required are set using '#define'
* statements or directly in routine InitUserData. In this program,
* np = 1, ns = 2. The boundary conditions are homogeneous Neumann:
* normal derivative = 0.
*
* A polynomial in x and y is used to set the initial values of the
* first np variables (the prey variables) at each x,y location,
* while initial values for the remaining (predator) variables are
* set to a flat value, which is corrected by IDACalcIC.
*
* The PDEs are discretized by central differencing on a MX by MY
* mesh.
*
* The DAE system is solved by IDAS using the SUNBAND linear solver.
* Output is printed at t = 0, .001, .01, .1, .4, .7, 1.
*
* Optionally, we can set the number of threads from environment
* variable or command line. To check the current value for number
* of threads from environment:
* % echo $OMP_NUM_THREADS
*
* Execution:
*
* To use the default value for the number of threads from
* the OMP_NUM_THREADS environment value:
* % ./idasFoodWeb_bnd_omp
* To specify the number of threads at the command line, use
* % ./idasFoodWeb_bnd_omp num_threads
* where num_threads is the desired number of threads.
*
* -----------------------------------------------------------------
* References:
* [1] Peter N. Brown and Alan C. Hindmarsh,
* Reduced Storage Matrix Methods in Stiff ODE systems, Journal
* of Applied Mathematics and Computation, Vol. 31 (May 1989),
* pp. 40-91.
*
* [2] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Using Krylov Methods in the Solution of Large-Scale
* Differential-Algebraic Systems, SIAM J. Sci. Comput., 15
* (1994), pp. 1467-1488.
*
* [3] Peter N. Brown, Alan C. Hindmarsh, and Linda R. Petzold,
* Consistent Initial Condition Calculation for Differential-
* Algebraic Systems, SIAM J. Sci. Comput., 19 (1998),
* pp. 1495-1512.
* -----------------------------------------------------------------
*/
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <idas/idas.h>
#include <sunmatrix/sunmatrix_band.h>
#include <sunlinsol/sunlinsol_band.h>
#include <nvector/nvector_openmp.h>
#include <sundials/sundials_direct.h>
#include <sundials/sundials_types.h>
#ifdef _OPENMP
#include <omp.h>
#endif
/* Problem Constants. */
#define NPREY 1 /* No. of prey (= no. of predators). */
#define NUM_SPECIES 2*NPREY
#define PI RCONST(3.1415926535898)
#define FOURPI (RCONST(4.0)*PI)
#define MX 20 /* MX = number of x mesh points */
#define MY 20 /* MY = number of y mesh points */
#define NSMX (NUM_SPECIES * MX)
#define NEQ (NUM_SPECIES*MX*MY)
#define AA RCONST(1.0) /* Coefficient in above eqns. for a */
#define EE RCONST(10000.) /* Coefficient in above eqns. for a */
#define GG RCONST(0.5e-6) /* Coefficient in above eqns. for a */
#define BB RCONST(1.0) /* Coefficient in above eqns. for b */
#define DPREY RCONST(1.0) /* Coefficient in above eqns. for d */
#define DPRED RCONST(0.05) /* Coefficient in above eqns. for d */
#define ALPHA RCONST(50.) /* Coefficient alpha in above eqns. */
#define BETA RCONST(1000.) /* Coefficient beta in above eqns. */
#define AX RCONST(1.0) /* Total range of x variable */
#define AY RCONST(1.0) /* Total range of y variable */
#define RTOL RCONST(1.e-5) /* Relative tolerance */
#define ATOL RCONST(1.e-5) /* Absolute tolerance */
#define NOUT 6 /* Number of output times */
#define TMULT RCONST(10.0) /* Multiplier for tout values */
#define TADD RCONST(0.3) /* Increment for tout values */
#define ZERO RCONST(0.)
#define ONE RCONST(1.0)
/*
* User-defined vector and accessor macro: IJ_Vptr.
* IJ_Vptr is defined in order to express the underlying 3-D structure of
* the dependent variable vector from its underlying 1-D storage (an N_Vector).
* IJ_Vptr(vv,i,j) returns a pointer to the location in vv corresponding to
* species index is = 0, x-index ix = i, and y-index jy = j.
*/
#define IJ_Vptr(vv,i,j) (&NV_Ith_OMP(vv, (i)*NUM_SPECIES + (j)*NSMX))
/* Type: UserData. Contains problem constants, etc. */
typedef struct {
sunindextype Neq, ns, np, mx, my;
realtype dx, dy, **acoef;
realtype cox[NUM_SPECIES], coy[NUM_SPECIES], bcoef[NUM_SPECIES];
N_Vector rates;
int nthreads;
} *UserData;
/* Prototypes for functions called by the IDA Solver. */
static int resweb(realtype time, N_Vector cc, N_Vector cp, N_Vector resval,
void *user_data);
/* Prototypes for private Helper Functions. */
static void InitUserData(UserData webdata);
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata);
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol);
static void PrintOutput(void *ida_mem, N_Vector c, realtype t);
static void PrintFinalStats(void *ida_mem);
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate, UserData webdata);
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata);
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2);
static int check_retval(void *returnvalue, char *funcname, int opt);
/*
*--------------------------------------------------------------------
* MAIN PROGRAM
*--------------------------------------------------------------------
*/
int main(int argc, char *argv[])
{
void *ida_mem;
SUNMatrix A;
SUNLinearSolver LS;
UserData webdata;
N_Vector cc, cp, id;
int iout, retval;
sunindextype mu, ml;
realtype rtol, atol, t0, tout, tret;
int num_threads;
ida_mem = NULL;
A = NULL;
LS = NULL;
webdata = NULL;
cc = cp = id = NULL;
/* Set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS enviroment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* Allocate and initialize user data block webdata. */
webdata = (UserData) malloc(sizeof *webdata);
webdata->rates = N_VNew_OpenMP(NEQ, num_threads);
webdata->acoef = newDenseMat(NUM_SPECIES, NUM_SPECIES);
webdata->nthreads = num_threads;
InitUserData(webdata);
/* Allocate N-vectors and initialize cc, cp, and id. */
cc = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cc, "N_VNew_OpenMP", 0)) return(1);
cp = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)cp, "N_VNew_OpenMP", 0)) return(1);
id = N_VNew_OpenMP(NEQ, num_threads);
if(check_retval((void *)id, "N_VNew_OpenMP", 0)) return(1);
SetInitialProfiles(cc, cp, id, webdata);
/* Set remaining inputs to IDAMalloc. */
t0 = ZERO;
rtol = RTOL;
atol = ATOL;
/* Call IDACreate and IDAMalloc to initialize IDA. */
ida_mem = IDACreate();
if(check_retval((void *)ida_mem, "IDACreate", 0)) return(1);
retval = IDASetUserData(ida_mem, webdata);
if(check_retval(&retval, "IDASetUserData", 1)) return(1);
retval = IDASetId(ida_mem, id);
if(check_retval(&retval, "IDASetId", 1)) return(1);
retval = IDAInit(ida_mem, resweb, t0, cc, cp);
if(check_retval(&retval, "IDAInit", 1)) return(1);
retval = IDASStolerances(ida_mem, rtol, atol);
if(check_retval(&retval, "IDASStolerances", 1)) return(1);
/* Setup band matrix and linear solver, and attach to IDA. */
mu = ml = NSMX;
A = SUNBandMatrix(NEQ, mu, ml);
if(check_retval((void *)A, "SUNBandMatrix", 0)) return(1);
LS = SUNLinSol_Band(cc, A);
if(check_retval((void *)LS, "SUNLinSol_Band", 0)) return(1);
retval = IDASetLinearSolver(ida_mem, LS, A);
if(check_retval(&retval, "IDASetLinearSolver", 1)) return(1);
/* Call IDACalcIC (with default options) to correct the initial values. */
tout = RCONST(0.001);
retval = IDACalcIC(ida_mem, IDA_YA_YDP_INIT, tout);
if(check_retval(&retval, "IDACalcIC", 1)) return(1);
/* Print heading, basic parameters, and initial values. */
PrintHeader(mu, ml, rtol, atol);
PrintOutput(ida_mem, cc, ZERO);
/* Loop over iout, call IDASolve (normal mode), print selected output. */
for (iout = 1; iout <= NOUT; iout++) {
retval = IDASolve(ida_mem, tout, &tret, cc, cp, IDA_NORMAL);
if(check_retval(&retval, "IDASolve", 1)) return(retval);
PrintOutput(ida_mem, cc, tret);
if (iout < 3) tout *= TMULT; else tout += TADD;
}
/* Print final statistics and free memory. */
PrintFinalStats(ida_mem);
printf("num_threads = %i\n\n", num_threads);
/* Free memory */
IDAFree(&ida_mem);
SUNLinSolFree(LS);
SUNMatDestroy(A);
N_VDestroy_OpenMP(cc);
N_VDestroy_OpenMP(cp);
N_VDestroy_OpenMP(id);
destroyMat(webdata->acoef);
N_VDestroy_OpenMP(webdata->rates);
free(webdata);
return(0);
}
/* Define lines for readability in later routines */
#define acoef (webdata->acoef)
#define bcoef (webdata->bcoef)
#define cox (webdata->cox)
#define coy (webdata->coy)
/*
*--------------------------------------------------------------------
* FUNCTIONS CALLED BY IDA
*--------------------------------------------------------------------
*/
/*
* resweb: System residual function for predator-prey system.
* This routine calls Fweb to get all the right-hand sides of the
* equations, then loads the residual vector accordingly,
* using cp in the case of prey species.
*/
static int resweb(realtype tt, N_Vector cc, N_Vector cp,
N_Vector res, void *user_data)
{
sunindextype jx, jy, is, yloc, loc, np;
realtype *resv, *cpv;
UserData webdata;
jx = jy = is = 0;
webdata = (UserData)user_data;
cpv = NV_DATA_OMP(cp);
resv = NV_DATA_OMP(res);
np = webdata->np;
/* Call Fweb to set res to vector of right-hand sides. */
Fweb(tt, cc, res, webdata);
/* Loop over all grid points, setting residual values appropriately
for differential or algebraic components. */
#pragma omp parallel for default(shared) private(jy, yloc, jx, loc, is) schedule(static) num_threads(webdata->nthreads)
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np)
resv[loc+is] = cpv[loc+is] - resv[loc+is];
else
resv[loc+is] = -resv[loc+is];
}
}
}
return(0);
}
/*
*--------------------------------------------------------------------
* PRIVATE FUNCTIONS
*--------------------------------------------------------------------
*/
/*
* InitUserData: Load problem constants in webdata (of type UserData).
*/
static void InitUserData(UserData webdata)
{
sunindextype i, j, np;
realtype *a1,*a2, *a3, *a4, dx2, dy2;
webdata->mx = MX;
webdata->my = MY;
webdata->ns = NUM_SPECIES;
webdata->np = NPREY;
webdata->dx = AX/(MX-1);
webdata->dy = AY/(MY-1);
webdata->Neq= NEQ;
/* Set up the coefficients a and b, and others found in the equations. */
np = webdata->np;
dx2 = (webdata->dx)*(webdata->dx); dy2 = (webdata->dy)*(webdata->dy);
for (i = 0; i < np; i++) {
a1 = &(acoef[i][np]);
a2 = &(acoef[i+np][0]);
a3 = &(acoef[i][0]);
a4 = &(acoef[i+np][np]);
/* Fill in the portion of acoef in the four quadrants, row by row. */
for (j = 0; j < np; j++) {
*a1++ = -GG;
*a2++ = EE;
*a3++ = ZERO;
*a4++ = ZERO;
}
/* Reset the diagonal elements of acoef to -AA. */
acoef[i][i] = -AA; acoef[i+np][i+np] = -AA;
/* Set coefficients for b and diffusion terms. */
bcoef[i] = BB; bcoef[i+np] = -BB;
cox[i] = DPREY/dx2; cox[i+np] = DPRED/dx2;
coy[i] = DPREY/dy2; coy[i+np] = DPRED/dy2;
}
}
/*
* SetInitialProfiles: Set initial conditions in cc, cp, and id.
* A polynomial profile is used for the prey cc values, and a constant
* (1.0e5) is loaded as the initial guess for the predator cc values.
* The id values are set to 1 for the prey and 0 for the predators.
* The prey cp values are set according to the given system, and
* the predator cp values are set to zero.
*/
static void SetInitialProfiles(N_Vector cc, N_Vector cp, N_Vector id,
UserData webdata)
{
sunindextype loc, yloc, is, jx, jy, np;
realtype xx, yy, xyfactor;
realtype *ccv, *cpv, *idv;
ccv = NV_DATA_OMP(cc);
cpv = NV_DATA_OMP(cp);
idv = NV_DATA_OMP(id);
np = webdata->np;
/* Loop over grid, load cc values and id values. */
for (jy = 0; jy < MY; jy++) {
yy = jy * webdata->dy;
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
xx = jx * webdata->dx;
xyfactor = RCONST(16.0)*xx*(ONE-xx)*yy*(ONE-yy);
xyfactor *= xyfactor;
loc = yloc + NUM_SPECIES*jx;
for (is = 0; is < NUM_SPECIES; is++) {
if (is < np) {
ccv[loc+is] = RCONST(10.0) + (realtype)(is+1) * xyfactor;
idv[loc+is] = ONE;
}
else {
ccv[loc+is] = RCONST(1.0e5);
idv[loc+is] = ZERO;
}
}
}
}
/* Set c' for the prey by calling the function Fweb. */
Fweb(ZERO, cc, cp, webdata);
/* Set c' for predators to 0. */
for (jy = 0; jy < MY; jy++) {
yloc = NSMX * jy;
for (jx = 0; jx < MX; jx++) {
loc = yloc + NUM_SPECIES * jx;
for (is = np; is < NUM_SPECIES; is++) {
cpv[loc+is] = ZERO;
}
}
}
}
/*
* Print first lines of output (problem description)
*/
static void PrintHeader(sunindextype mu, sunindextype ml, realtype rtol, realtype atol)
{
printf("\nidasFoodWeb_bnd_omp: Predator-prey DAE OpenMP example problem for IDAS \n\n");
printf("Number of species ns: %d", NUM_SPECIES);
printf(" Mesh dimensions: %d x %d", MX, MY);
printf(" System size: %d\n", NEQ);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("Tolerance parameters: rtol = %Lg atol = %Lg\n", rtol, atol);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#else
printf("Tolerance parameters: rtol = %g atol = %g\n", rtol, atol);
#endif
printf("Linear solver: SUNBAND, Band parameters mu = %ld, ml = %ld\n",
(long int) mu, (long int) ml);
printf("CalcIC called to correct initial predator concentrations.\n\n");
printf("-----------------------------------------------------------\n");
printf(" t bottom-left top-right");
printf(" | nst k h\n");
printf("-----------------------------------------------------------\n\n");
}
/*
* PrintOutput: Print output values at output time t = tt.
* Selected run statistics are printed. Then values of the concentrations
* are printed for the bottom left and top right grid points only.
*/
static void PrintOutput(void *ida_mem, N_Vector c, realtype t)
{
int i, kused, retval;
long int nst;
realtype *c_bl, *c_tr, hused;
retval = IDAGetLastOrder(ida_mem, &kused);
check_retval(&retval, "IDAGetLastOrder", 1);
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetLastStep(ida_mem, &hused);
check_retval(&retval, "IDAGetLastStep", 1);
c_bl = IJ_Vptr(c,0,0);
c_tr = IJ_Vptr(c,MX-1,MY-1);
#if defined(SUNDIALS_EXTENDED_PRECISION)
printf("%8.2Le %12.4Le %12.4Le | %3ld %1d %12.4Le\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4Le %12.4Le |\n",c_bl[i],c_tr[i]);
#elif defined(SUNDIALS_DOUBLE_PRECISION)
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#else
printf("%8.2e %12.4e %12.4e | %3ld %1d %12.4e\n",
t, c_bl[0], c_tr[0], nst, kused, hused);
for (i=1;i<NUM_SPECIES;i++)
printf(" %12.4e %12.4e |\n",c_bl[i],c_tr[i]);
#endif
printf("\n");
}
/*
* PrintFinalStats: Print final run data contained in iopt.
*/
static void PrintFinalStats(void *ida_mem)
{
long int nst, nre, nreLS, nni, nje, netf, ncfn;
int retval;
retval = IDAGetNumSteps(ida_mem, &nst);
check_retval(&retval, "IDAGetNumSteps", 1);
retval = IDAGetNumNonlinSolvIters(ida_mem, &nni);
check_retval(&retval, "IDAGetNumNonlinSolvIters", 1);
retval = IDAGetNumResEvals(ida_mem, &nre);
check_retval(&retval, "IDAGetNumResEvals", 1);
retval = IDAGetNumErrTestFails(ida_mem, &netf);
check_retval(&retval, "IDAGetNumErrTestFails", 1);
retval = IDAGetNumNonlinSolvConvFails(ida_mem, &ncfn);
check_retval(&retval, "IDAGetNumNonlinSolvConvFails", 1);
retval = IDAGetNumJacEvals(ida_mem, &nje);
check_retval(&retval, "IDAGetNumJacEvals", 1);
retval = IDAGetNumLinResEvals(ida_mem, &nreLS);
check_retval(&retval, "IDAGetNumLinResEvals", 1);
printf("-----------------------------------------------------------\n");
printf("Final run statistics: \n\n");
printf("Number of steps = %ld\n", nst);
printf("Number of residual evaluations = %ld\n", nre+nreLS);
printf("Number of Jacobian evaluations = %ld\n", nje);
printf("Number of nonlinear iterations = %ld\n", nni);
printf("Number of error test failures = %ld\n", netf);
printf("Number of nonlinear conv. failures = %ld\n", ncfn);
}
/*
* Fweb: Rate function for the food-web problem.
* This routine computes the right-hand sides of the system equations,
* consisting of the diffusion term and interaction term.
* The interaction term is computed by the function WebRates.
*/
static void Fweb(realtype tcalc, N_Vector cc, N_Vector crate,
UserData webdata)
{
sunindextype jx, jy, is, idyu, idyl, idxu, idxl;
realtype xx, yy, *cxy, *ratesxy, *cratexy, dcyli, dcyui, dcxli, dcxui;
/* Loop over grid points, evaluate interaction vector (length ns),
form diffusion difference terms, and load crate. */
jx = jy = is = 0;
for (jy = 0; jy < MY; jy++) {
yy = (webdata->dy) * jy ;
idyu = (jy!=MY-1) ? NSMX : -NSMX;
idyl = (jy!= 0 ) ? NSMX : -NSMX;
for (jx = 0; jx < MX; jx++) {
xx = (webdata->dx) * jx;
idxu = (jx!= MX-1) ? NUM_SPECIES : -NUM_SPECIES;
idxl = (jx!= 0 ) ? NUM_SPECIES : -NUM_SPECIES;
cxy = IJ_Vptr(cc,jx,jy);
ratesxy = IJ_Vptr(webdata->rates,jx,jy);
cratexy = IJ_Vptr(crate,jx,jy);
/* Get interaction vector at this grid point. */
WebRates(xx, yy, cxy, ratesxy, webdata);
/* Loop over species, do differencing, load crate segment. */
#pragma omp parallel for default(shared) private(is, dcyli, dcyui, dcxli, dcxui) schedule(static) num_threads(webdata->nthreads)
for (is = 0; is < NUM_SPECIES; is++) {
/* Differencing in y. */
dcyli = *(cxy+is) - *(cxy - idyl + is) ;
dcyui = *(cxy + idyu + is) - *(cxy+is);
/* Differencing in x. */
dcxli = *(cxy+is) - *(cxy - idxl + is);
dcxui = *(cxy + idxu +is) - *(cxy+is);
/* Compute the crate values at (xx,yy). */
cratexy[is] = coy[is] * (dcyui - dcyli) +
cox[is] * (dcxui - dcxli) + ratesxy[is];
} /* End is loop */
} /* End of jx loop */
} /* End of jy loop */
}
/*
* WebRates: Evaluate reaction rates at a given spatial point.
* At a given (x,y), evaluate the array of ns reaction terms R.
*/
static void WebRates(realtype xx, realtype yy, realtype *cxy, realtype *ratesxy,
UserData webdata)
{
int is;
realtype fac;
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = dotprod(NUM_SPECIES, cxy, acoef[is]);
fac = ONE + ALPHA*xx*yy + BETA*sin(FOURPI*xx)*sin(FOURPI*yy);
for (is = 0; is < NUM_SPECIES; is++)
ratesxy[is] = cxy[is]*( bcoef[is]*fac + ratesxy[is] );
}
/*
* dotprod: dot product routine for realtype arrays, for use by WebRates.
*/
static realtype dotprod(sunindextype size, realtype *x1, realtype *x2)
{
sunindextype i;
realtype *xx1, *xx2, temp = ZERO;
xx1 = x1; xx2 = x2;
for (i = 0; i < size; i++) temp += (*xx1++) * (*xx2++);
return(temp);
}
/*
* Check function return value...
* opt == 0 means SUNDIALS function allocates memory so check if
* returned NULL pointer
* opt == 1 means SUNDIALS function returns an integer value so check if
* retval < 0
* opt == 2 means function allocates memory so check if returned
* NULL pointer
*/
static int check_retval(void *returnvalue, char *funcname, int opt)
{
int *retval;
if (opt == 0 && returnvalue == NULL) {
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
} else if (opt == 1) {
/* Check if retval < 0 */
retval = (int *) returnvalue;
if (*retval < 0) {
fprintf(stderr,
"\nSUNDIALS_ERROR: %s() failed with retval = %d\n\n",
funcname, *retval);
return(1);
}
} else if (opt == 2 && returnvalue == NULL) {
/* Check if function returned NULL pointer - no memory allocated */
fprintf(stderr,
"\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return(1);
}
return(0);
}
|
GB_unaryop__identity_uint16_int16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint16_int16
// op(A') function: GB_tran__identity_uint16_int16
// C type: uint16_t
// A type: int16_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int16_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint16_int16
(
uint16_t *restrict Cx,
const int16_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint16_int16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cache.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% CCCC AAA CCCC H H EEEEE %
% C A A C H H E %
% C AAAAA C HHHHH EEE %
% C A A C H H E %
% CCCC A A CCCC H H EEEEE %
% %
% %
% MagickCore Pixel Cache Methods %
% %
% Software Design %
% Cristy %
% July 1999 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/distribute-cache-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/nt-base-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/policy.h"
#include "MagickCore/quantum.h"
#include "MagickCore/random_.h"
#include "MagickCore/registry.h"
#include "MagickCore/resource_.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#if defined(MAGICKCORE_ZLIB_DELEGATE)
#include "zlib.h"
#endif
/*
Define declarations.
*/
#define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent)
#define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \
GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse)
/*
Typedef declarations.
*/
typedef struct _MagickModulo
{
ssize_t
quotient,
remainder;
} MagickModulo;
/*
Forward declarations.
*/
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static Cache
GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *)
magick_hot_spot;
static const Quantum
*GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t,
const ssize_t,const size_t,const size_t,ExceptionInfo *),
*GetVirtualPixelsCache(const Image *);
static const void
*GetVirtualMetacontentFromCache(const Image *);
static MagickBooleanType
GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *,
ExceptionInfo *),
GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod,
const ssize_t,const ssize_t,Quantum *,ExceptionInfo *),
OpenPixelCache(Image *,const MapMode,ExceptionInfo *),
OpenPixelCacheOnDisk(CacheInfo *,const MapMode),
ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
ReadPixelCacheMetacontent(CacheInfo *magick_restrict,
NexusInfo *magick_restrict,ExceptionInfo *),
SyncAuthenticPixelsCache(Image *,ExceptionInfo *),
WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict,
ExceptionInfo *),
WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict,
ExceptionInfo *);
static Quantum
*GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t,
const size_t,ExceptionInfo *),
*SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode,
const ssize_t,const ssize_t,const size_t,const size_t,
const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *)
magick_hot_spot;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
static void
CopyOpenCLBuffer(CacheInfo *magick_restrict);
#endif
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
/*
Global declarations.
*/
static SemaphoreInfo
*cache_semaphore = (SemaphoreInfo *) NULL;
static ssize_t
cache_anonymous_memory = (-1);
static time_t
cache_epoch = 0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCache() acquires a pixel cache.
%
% The format of the AcquirePixelCache() method is:
%
% Cache AcquirePixelCache(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate Cache AcquirePixelCache(const size_t number_threads)
{
CacheInfo
*magick_restrict cache_info;
char
*value;
cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info));
if (cache_info == (CacheInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(cache_info,0,sizeof(*cache_info));
cache_info->type=UndefinedCache;
cache_info->mode=IOMode;
cache_info->disk_mode=IOMode;
cache_info->colorspace=sRGBColorspace;
cache_info->file=(-1);
cache_info->id=GetMagickThreadId();
cache_info->number_threads=number_threads;
if (GetOpenMPMaximumThreads() > cache_info->number_threads)
cache_info->number_threads=GetOpenMPMaximumThreads();
if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads)
cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
if (cache_info->number_threads == 0)
cache_info->number_threads=1;
cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads);
if (cache_info->nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
value=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
value=GetPolicyValue("cache:synchronize");
if (value != (const char *) NULL)
{
cache_info->synchronize=IsStringTrue(value);
value=DestroyString(value);
}
cache_info->width_limit=GetMagickResourceLimit(WidthResource);
cache_info->height_limit=GetMagickResourceLimit(HeightResource);
cache_info->semaphore=AcquireSemaphoreInfo();
cache_info->reference_count=1;
cache_info->file_semaphore=AcquireSemaphoreInfo();
cache_info->debug=IsEventLogging();
cache_info->signature=MagickCoreSignature;
return((Cache ) cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCacheNexus() allocates the NexusInfo structure.
%
% The format of the AcquirePixelCacheNexus method is:
%
% NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
%
% A description of each parameter follows:
%
% o number_threads: the number of nexus threads.
%
*/
MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads)
{
NexusInfo
**magick_restrict nexus_info;
register ssize_t
i;
nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2*
number_threads,sizeof(*nexus_info)));
if (nexus_info == (NexusInfo **) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads,
sizeof(**nexus_info));
if (*nexus_info == (NexusInfo *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info));
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
nexus_info[i]=(*nexus_info+i);
if (i < (ssize_t) number_threads)
nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i);
nexus_info[i]->signature=MagickCoreSignature;
}
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquirePixelCachePixels() returns the pixels associated with the specified
% image.
%
% The format of the AcquirePixelCachePixels() method is:
%
% void *AcquirePixelCachePixels(const Image *image,size_t *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=0;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
*length=(size_t) cache_info->length;
return(cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t G e n e s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentGenesis() instantiates the cache component.
%
% The format of the CacheComponentGenesis method is:
%
% MagickBooleanType CacheComponentGenesis(void)
%
*/
MagickPrivate MagickBooleanType CacheComponentGenesis(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
cache_semaphore=AcquireSemaphoreInfo();
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a c h e C o m p o n e n t T e r m i n u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CacheComponentTerminus() destroys the cache component.
%
% The format of the CacheComponentTerminus() method is:
%
% CacheComponentTerminus(void)
%
*/
MagickPrivate void CacheComponentTerminus(void)
{
if (cache_semaphore == (SemaphoreInfo *) NULL)
ActivateSemaphoreInfo(&cache_semaphore);
/* no op-- nothing to destroy */
RelinquishSemaphoreInfo(&cache_semaphore);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l i p P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipPixelCacheNexus() clips the cache nexus as defined by the image clip
% mask. The method returns MagickTrue if the pixel region is clipped,
% otherwise MagickFalse.
%
% The format of the ClipPixelCacheNexus() method is:
%
% MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClipPixelCacheNexus(Image *image,
NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & WriteMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=QuantumScale*GetPixelWriteMask(image,p);
if (fabs(mask_alpha) >= MagickEpsilon)
{
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha*
GetPixelAlpha(image,p),(double) q[i],(double)
GetPixelAlpha(image,q)));
}
SetPixelAlpha(image,GetPixelAlpha(image,p),q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCache() clones a pixel cache.
%
% The format of the ClonePixelCache() method is:
%
% Cache ClonePixelCache(const Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate Cache ClonePixelCache(const Cache cache)
{
CacheInfo
*magick_restrict clone_info;
const CacheInfo
*magick_restrict cache_info;
assert(cache != NULL);
cache_info=(const CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads);
clone_info->virtual_pixel_method=cache_info->virtual_pixel_method;
return((Cache ) clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheMethods() clones the pixel cache methods from one cache to
% another.
%
% The format of the ClonePixelCacheMethods() method is:
%
% void ClonePixelCacheMethods(Cache clone,const Cache cache)
%
% A description of each parameter follows:
%
% o clone: Specifies a pointer to a Cache structure.
%
% o cache: the pixel cache.
%
*/
MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict source_info;
assert(clone != (Cache) NULL);
source_info=(CacheInfo *) clone;
assert(source_info->signature == MagickCoreSignature);
if (source_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
source_info->filename);
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
source_info->methods=cache_info->methods;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l o n e P i x e l C a c h e R e p o s i t o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClonePixelCacheRepository() clones the source pixel cache to the destination
% cache.
%
% The format of the ClonePixelCacheRepository() method is:
%
% MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info,
% CacheInfo *source_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o source_info: the source pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ClonePixelCacheOnDisk(
CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info)
{
MagickSizeType
extent;
size_t
quantum;
ssize_t
count;
struct stat
file_stats;
unsigned char
*buffer;
/*
Clone pixel cache on disk with identical morphology.
*/
if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) ||
(OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse))
return(MagickFalse);
if ((lseek(cache_info->file,0,SEEK_SET) < 0) ||
(lseek(clone_info->file,0,SEEK_SET) < 0))
return(MagickFalse);
quantum=(size_t) MagickMaxBufferExtent;
if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0))
quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent);
buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer));
if (buffer == (unsigned char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
extent=0;
while ((count=read(cache_info->file,buffer,quantum)) > 0)
{
ssize_t
number_bytes;
number_bytes=write(clone_info->file,buffer,(size_t) count);
if (number_bytes != count)
break;
extent+=number_bytes;
}
buffer=(unsigned char *) RelinquishMagickMemory(buffer);
if (extent != cache_info->length)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType ClonePixelCacheRepository(
CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info,
ExceptionInfo *exception)
{
#define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource))
#define cache_number_threads(source,destination,chunk,multithreaded) \
num_threads((multithreaded) == 0 ? 1 : \
(((source)->type != MemoryCache) && ((source)->type != MapCache)) || \
(((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \
MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \
MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1))
MagickBooleanType
optimize,
status;
NexusInfo
**magick_restrict cache_nexus,
**magick_restrict clone_nexus;
size_t
length;
ssize_t
y;
assert(cache_info != (CacheInfo *) NULL);
assert(clone_info != (CacheInfo *) NULL);
assert(exception != (ExceptionInfo *) NULL);
if (cache_info->type == PingCache)
return(MagickTrue);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
if ((cache_info->storage_class == clone_info->storage_class) &&
(cache_info->colorspace == clone_info->colorspace) &&
(cache_info->alpha_trait == clone_info->alpha_trait) &&
(cache_info->channels == clone_info->channels) &&
(cache_info->columns == clone_info->columns) &&
(cache_info->rows == clone_info->rows) &&
(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) &&
(cache_info->metacontent_extent == clone_info->metacontent_extent))
{
/*
Identical pixel cache morphology.
*/
if (((cache_info->type == MemoryCache) ||
(cache_info->type == MapCache)) &&
((clone_info->type == MemoryCache) || (clone_info->type == MapCache)))
{
(void) memcpy(clone_info->pixels,cache_info->pixels,
cache_info->number_channels*cache_info->columns*cache_info->rows*
sizeof(*cache_info->pixels));
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
(void) memcpy(clone_info->metacontent,cache_info->metacontent,
cache_info->columns*cache_info->rows*
clone_info->metacontent_extent*sizeof(unsigned char));
return(MagickTrue);
}
if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache))
return(ClonePixelCacheOnDisk(cache_info,clone_info));
}
/*
Mismatched pixel cache morphology.
*/
cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads);
clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads);
length=cache_info->number_channels*sizeof(*cache_info->channel_map);
optimize=(cache_info->number_channels == clone_info->number_channels) &&
(memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ?
MagickTrue : MagickFalse;
length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns,
clone_info->number_channels*clone_info->columns);
status=MagickTrue;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
register ssize_t
x;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
(void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length);
if (optimize != MagickFalse)
(void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length*
sizeof(Quantum));
else
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
/*
Mismatched pixel channel map.
*/
p=cache_nexus[id]->pixels;
q=clone_nexus[id]->pixels;
for (x=0; x < (ssize_t) cache_info->columns; x++)
{
register ssize_t
i;
if (x == (ssize_t) clone_info->columns)
break;
for (i=0; i < (ssize_t) clone_info->number_channels; i++)
{
PixelChannel
channel;
PixelTrait
traits;
channel=clone_info->channel_map[i].channel;
traits=cache_info->channel_map[channel].traits;
if (traits != UndefinedPixelTrait)
*q=*(p+cache_info->channel_map[channel].offset);
q++;
}
p+=cache_info->number_channels;
}
}
status=WritePixelCachePixels(clone_info,clone_nexus[id],exception);
}
if ((cache_info->metacontent_extent != 0) &&
(clone_info->metacontent_extent != 0))
{
/*
Clone metacontent.
*/
length=(size_t) MagickMin(cache_info->metacontent_extent,
clone_info->metacontent_extent);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
cache_number_threads(cache_info,clone_info,cache_info->rows,1)
#endif
for (y=0; y < (ssize_t) cache_info->rows; y++)
{
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
if (status == MagickFalse)
continue;
if (y >= (ssize_t) clone_info->rows)
continue;
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y,
cache_info->columns,1,MagickFalse,cache_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception);
if (status == MagickFalse)
continue;
pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y,
clone_info->columns,1,MagickFalse,clone_nexus[id],exception);
if (pixels == (Quantum *) NULL)
continue;
if ((clone_nexus[id]->metacontent != (void *) NULL) &&
(cache_nexus[id]->metacontent != (void *) NULL))
(void) memcpy(clone_nexus[id]->metacontent,
cache_nexus[id]->metacontent,length*sizeof(unsigned char));
status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception);
}
}
clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads);
cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"%s => %s",
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type),
CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type));
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixelCache() method is:
%
% void DestroyImagePixelCache(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void DestroyImagePixelCache(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (image->cache != (void *) NULL)
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImagePixels() deallocates memory associated with the pixel cache.
%
% The format of the DestroyImagePixels() method is:
%
% void DestroyImagePixels(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DestroyImagePixels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL)
{
cache_info->methods.destroy_pixel_handler(image);
return;
}
image->cache=DestroyPixelCache(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCache() deallocates memory associated with the pixel cache.
%
% The format of the DestroyPixelCache() method is:
%
% Cache DestroyPixelCache(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info)
{
int
status;
status=(-1);
if (cache_info->file != -1)
{
status=close(cache_info->file);
cache_info->file=(-1);
RelinquishMagickResource(FileResource,1);
}
return(status == -1 ? MagickFalse : MagickTrue);
}
static inline void RelinquishPixelCachePixels(CacheInfo *cache_info)
{
switch (cache_info->type)
{
case MemoryCache:
{
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
{
cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl,
MagickTrue);
cache_info->pixels=(Quantum *) NULL;
break;
}
#endif
if (cache_info->mapped == MagickFalse)
cache_info->pixels=(Quantum *) RelinquishAlignedMemory(
cache_info->pixels);
else
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
RelinquishMagickResource(MemoryResource,cache_info->length);
break;
}
case MapCache:
{
(void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length);
cache_info->pixels=(Quantum *) NULL;
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(MapResource,cache_info->length);
}
case DiskCache:
{
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode))
(void) RelinquishUniqueFileResource(cache_info->cache_filename);
*cache_info->cache_filename='\0';
RelinquishMagickResource(DiskResource,cache_info->length);
break;
}
case DistributedCache:
{
*cache_info->cache_filename='\0';
(void) RelinquishDistributePixelCache((DistributeCacheInfo *)
cache_info->server_info);
break;
}
default:
break;
}
cache_info->type=UndefinedCache;
cache_info->mapped=MagickFalse;
cache_info->metacontent=(void *) NULL;
}
MagickPrivate Cache DestroyPixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count--;
if (cache_info->reference_count != 0)
{
UnlockSemaphoreInfo(cache_info->semaphore);
return((Cache) NULL);
}
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->debug != MagickFalse)
{
char
message[MagickPathExtent];
(void) FormatLocaleString(message,MagickPathExtent,"destroy %s",
cache_info->filename);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
RelinquishPixelCachePixels(cache_info);
if (cache_info->server_info != (DistributeCacheInfo *) NULL)
cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *)
cache_info->server_info);
if (cache_info->nexus_info != (NexusInfo **) NULL)
cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info,
cache_info->number_threads);
if (cache_info->random_info != (RandomInfo *) NULL)
cache_info->random_info=DestroyRandomInfo(cache_info->random_info);
if (cache_info->file_semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->file_semaphore);
if (cache_info->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&cache_info->semaphore);
cache_info->signature=(~MagickCoreSignature);
cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info);
cache=(Cache) NULL;
return(cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e s t r o y P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyPixelCacheNexus() destroys a pixel cache nexus.
%
% The format of the DestroyPixelCacheNexus() method is:
%
% NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus to destroy.
%
% o number_threads: the number of nexus threads.
%
*/
static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info)
{
if (nexus_info->mapped == MagickFalse)
(void) RelinquishAlignedMemory(nexus_info->cache);
else
(void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length);
nexus_info->cache=(Quantum *) NULL;
nexus_info->pixels=(Quantum *) NULL;
nexus_info->metacontent=(void *) NULL;
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
}
MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info,
const size_t number_threads)
{
register ssize_t
i;
assert(nexus_info != (NexusInfo **) NULL);
for (i=0; i < (ssize_t) (2*number_threads); i++)
{
if (nexus_info[i]->cache != (Quantum *) NULL)
RelinquishCacheNexusPixels(nexus_info[i]);
nexus_info[i]->signature=(~MagickCoreSignature);
}
*nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info);
nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info);
return(nexus_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontent() returns the authentic metacontent corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the associated pixels are not available.
%
% The format of the GetAuthenticMetacontent() method is:
%
% void *GetAuthenticMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void *GetAuthenticMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
{
void
*metacontent;
metacontent=cache_info->methods.
get_authentic_metacontent_from_handler(image);
return(metacontent);
}
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticMetacontentFromCache() returns the meta-content corresponding
% with the last call to QueueAuthenticPixelsCache() or
% GetAuthenticPixelsCache().
%
% The format of the GetAuthenticMetacontentFromCache() method is:
%
% void *GetAuthenticMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void *GetAuthenticMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->metacontent);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL
% operations.
%
% The format of the GetAuthenticOpenCLBuffer() method is:
%
% cl_mem GetAuthenticOpenCLBuffer(const Image *image,
% MagickCLDevice device,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o device: the device to use.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image,
MagickCLDevice device,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(device != (const MagickCLDevice) NULL);
cache_info=(CacheInfo *) image->cache;
if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1))
{
SyncImagePixelCache((Image *) image,exception);
cache_info=(CacheInfo *) image->cache;
}
if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse))
return((cl_mem) NULL);
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->opencl != (MagickCLCacheInfo) NULL) &&
(cache_info->opencl->device->context != device->context))
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
{
assert(cache_info->pixels != (Quantum *) NULL);
cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels,
cache_info->length);
}
if (cache_info->opencl != (MagickCLCacheInfo) NULL)
RetainOpenCLMemObject(cache_info->opencl->buffer);
UnlockSemaphoreInfo(cache_info->semaphore);
if (cache_info->opencl == (MagickCLCacheInfo) NULL)
return((cl_mem) NULL);
assert(cache_info->opencl->pixels == cache_info->pixels);
return(cache_info->opencl->buffer);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or
% disk pixel cache as defined by the geometry parameters. A pointer to the
% pixels is returned if the pixels are transferred, otherwise a NULL is
% returned.
%
% The format of the GetAuthenticPixelCacheNexus() method is:
%
% Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to return.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
Quantum
*magick_restrict pixels;
/*
Transfer pixels from the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((Quantum *) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(pixels);
if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)
return((Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsFromCache() returns the pixels associated with the last
% call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods.
%
% The format of the GetAuthenticPixelsFromCache() method is:
%
% Quantum *GetAuthenticPixelsFromCache(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static Quantum *GetAuthenticPixelsFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelQueue() returns the authentic pixels associated
% corresponding with the last call to QueueAuthenticPixels() or
% GetAuthenticPixels().
%
% The format of the GetAuthenticPixelQueue() method is:
%
% Quantum *GetAuthenticPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Quantum *GetAuthenticPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
return(cache_info->methods.get_authentic_pixels_from_handler(image));
assert(id < (int) cache_info->number_threads);
return(cache_info->nexus_info[id]->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixels() obtains a pixel region for read/write access. If the
% region is successfully accessed, a pointer to a Quantum array
% representing the region is returned, otherwise NULL is returned.
%
% The returned pointer may point to a temporary working copy of the pixels
% or it may point to the original pixels in memory. Performance is maximized
% if the selected region is part of one row, or one or more full rows, since
% then there is opportunity to access the pixels in-place (without a copy)
% if the image is in memory, or in a memory-mapped file. The returned pointer
% must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image has corresponding metacontent,call
% GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the
% meta-content corresponding to the region. Once the Quantum array has
% been updated, the changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the GetAuthenticPixels() method is:
%
% Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns,
rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache
% as defined by the geometry parameters. A pointer to the pixels is returned
% if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetAuthenticPixelsCache() method is:
%
% Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtent() returns the extent of the pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetAuthenticPixels().
%
% The format of the GetImageExtent() method is:
%
% MagickSizeType GetImageExtent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickSizeType GetImageExtent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCache() ensures that there is only a single reference to the
% pixel cache to be modified, updating the provided cache pointer to point to
% a clone of the original pixel cache if necessary.
%
% The format of the GetImagePixelCache method is:
%
% Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o clone: any value other than MagickFalse clones the cache pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType ValidatePixelCacheMorphology(
const Image *magick_restrict image)
{
const CacheInfo
*magick_restrict cache_info;
const PixelChannelMap
*magick_restrict p,
*magick_restrict q;
/*
Does the image match the pixel cache morphology?
*/
cache_info=(CacheInfo *) image->cache;
p=image->channel_map;
q=cache_info->channel_map;
if ((image->storage_class != cache_info->storage_class) ||
(image->colorspace != cache_info->colorspace) ||
(image->alpha_trait != cache_info->alpha_trait) ||
(image->channels != cache_info->channels) ||
(image->columns != cache_info->columns) ||
(image->rows != cache_info->rows) ||
(image->number_channels != cache_info->number_channels) ||
(memcmp(p,q,image->number_channels*sizeof(*p)) != 0) ||
(image->metacontent_extent != cache_info->metacontent_extent) ||
(cache_info->nexus_info == (NexusInfo **) NULL))
return(MagickFalse);
return(MagickTrue);
}
static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
destroy,
status;
static MagickSizeType
cache_timelimit = MagickResourceInfinity,
cpu_throttle = MagickResourceInfinity,
cycles = 0;
status=MagickTrue;
if (cpu_throttle == MagickResourceInfinity)
cpu_throttle=GetMagickResourceLimit(ThrottleResource);
if ((cpu_throttle != 0) && ((cycles++ % 32) == 0))
MagickDelay(cpu_throttle);
if (cache_epoch == 0)
{
/*
Set the expire time in seconds.
*/
cache_timelimit=GetMagickResourceLimit(TimeResource);
cache_epoch=GetMagickTime();
}
if ((cache_timelimit != MagickResourceInfinity) &&
((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit))
{
#if defined(ECANCELED)
errno=ECANCELED;
#endif
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded");
}
LockSemaphoreInfo(image->semaphore);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
destroy=MagickFalse;
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
LockSemaphoreInfo(cache_info->semaphore);
if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode))
{
CacheInfo
*clone_info;
Image
clone_image;
/*
Clone pixel cache.
*/
clone_image=(*image);
clone_image.semaphore=AcquireSemaphoreInfo();
clone_image.reference_count=1;
clone_image.cache=ClonePixelCache(cache_info);
clone_info=(CacheInfo *) clone_image.cache;
status=OpenPixelCache(&clone_image,IOMode,exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
if (clone != MagickFalse)
status=ClonePixelCacheRepository(clone_info,cache_info,
exception);
if (status == MagickFalse)
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
else
{
destroy=MagickTrue;
image->cache=clone_info;
}
}
RelinquishSemaphoreInfo(&clone_image.semaphore);
}
UnlockSemaphoreInfo(cache_info->semaphore);
}
if (destroy != MagickFalse)
cache_info=(CacheInfo *) DestroyPixelCache(cache_info);
if (status != MagickFalse)
{
/*
Ensure the image matches the pixel cache morphology.
*/
image->type=UndefinedType;
if (ValidatePixelCacheMorphology(image) == MagickFalse)
{
status=OpenPixelCache(image,IOMode,exception);
cache_info=(CacheInfo *) image->cache;
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
}
}
UnlockSemaphoreInfo(image->semaphore);
if (status == MagickFalse)
return((Cache) NULL);
return(image->cache);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e P i x e l C a c h e T y p e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePixelCacheType() returns the pixel cache type: UndefinedCache,
% DiskCache, MemoryCache, MapCache, or PingCache.
%
% The format of the GetImagePixelCacheType() method is:
%
% CacheType GetImagePixelCacheType(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport CacheType GetImagePixelCacheType(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->type);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e A u t h e n t i c P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixel() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixel() method is:
%
% MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType CopyPixel(const Image *image,
const Quantum *source,Quantum *destination)
{
register ssize_t
i;
if (source == (const Quantum *) NULL)
{
destination[RedPixelChannel]=ClampToQuantum(image->background_color.red);
destination[GreenPixelChannel]=ClampToQuantum(
image->background_color.green);
destination[BluePixelChannel]=ClampToQuantum(
image->background_color.blue);
destination[BlackPixelChannel]=ClampToQuantum(
image->background_color.black);
destination[AlphaPixelChannel]=ClampToQuantum(
image->background_color.alpha);
return(MagickFalse);
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
destination[channel]=source[i];
}
return(MagickTrue);
}
MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
register Quantum
*magick_restrict q;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL)
return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception));
q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e A u t h e n t i c P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs.
%
% The format of the GetOneAuthenticPixelFromCache() method is:
%
% MagickBooleanType GetOneAuthenticPixelFromCache(const Image image,
% const ssize_t x,const ssize_t y,Quantum *pixel,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id],
exception);
return(CopyPixel(image,q,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixel() returns a single virtual pixel at the specified
% (x,y) location. The image background color is returned if an error occurs.
% If you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixel() method is:
%
% MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x,
% const ssize_t y,Quantum *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image,
const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
if (cache_info->methods.get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
return(cache_info->methods.get_one_virtual_pixel_from_handler(image,
GetPixelCacheVirtualMethod(image),x,y,pixel,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
1UL,1UL,cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t O n e V i r t u a l P i x e l F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelFromCache() returns a single virtual pixel at the
% specified (x,y) location. The image background color is returned if an
% error occurs.
%
% The format of the GetOneVirtualPixelFromCache() method is:
%
% MagickBooleanType GetOneVirtualPixelFromCache(const Image image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% Quantum *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: These values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
Quantum *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
(void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel));
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
return(CopyPixel(image,p,pixel));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t O n e V i r t u a l P i x e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y)
% location. The image background color is returned if an error occurs. If
% you plan to modify the pixel, use GetOneAuthenticPixel() instead.
%
% The format of the GetOneVirtualPixelInfo() method is:
%
% MagickBooleanType GetOneVirtualPixelInfo(const Image image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,PixelInfo *pixel,ExceptionInfo exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y: these values define the location of the pixel to return.
%
% o pixel: return a pixel at the specified (x,y) location.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
PixelInfo *pixel,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
GetPixelInfo(image,pixel);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL,
cache_info->nexus_info[id],exception);
if (p == (const Quantum *) NULL)
return(MagickFalse);
GetPixelInfoPixel(image,p,pixel);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e C o l o r s p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheColorspace() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheColorspace() method is:
%
% Colorspace GetPixelCacheColorspace(Cache cache)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
*/
MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->colorspace);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheFilename() returns the filename associated with the pixel
% cache.
%
% The format of the GetPixelCacheFilename() method is:
%
% const char *GetPixelCacheFilename(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const char *GetPixelCacheFilename(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->cache_filename);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheMethods() initializes the CacheMethods structure.
%
% The format of the GetPixelCacheMethods() method is:
%
% void GetPixelCacheMethods(CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods)
{
assert(cache_methods != (CacheMethods *) NULL);
(void) memset(cache_methods,0,sizeof(*cache_methods));
cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache;
cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache;
cache_methods->get_virtual_metacontent_from_handler=
GetVirtualMetacontentFromCache;
cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache;
cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache;
cache_methods->get_authentic_metacontent_from_handler=
GetAuthenticMetacontentFromCache;
cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache;
cache_methods->get_one_authentic_pixel_from_handler=
GetOneAuthenticPixelFromCache;
cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache;
cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache;
cache_methods->destroy_pixel_handler=DestroyImagePixelCache;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e N e x u s E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheNexusExtent() returns the extent of the pixels associated
% corresponding with the last call to SetPixelCacheNexusPixels() or
% GetPixelCacheNexusPixels().
%
% The format of the GetPixelCacheNexusExtent() method is:
%
% MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o nexus_info: the nexus info.
%
*/
MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
extent;
assert(cache != NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height;
if (extent == 0)
return((MagickSizeType) cache_info->columns*cache_info->rows);
return(extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCachePixels() returns the pixels associated with the specified image.
%
% The format of the GetPixelCachePixels() method is:
%
% void *GetPixelCachePixels(Image *image,MagickSizeType *length,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o length: the pixel cache length.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
assert(length != (MagickSizeType *) NULL);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*length=cache_info->length;
if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache))
return((void *) NULL);
return((void *) cache_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheStorageClass() returns the class type of the pixel cache.
%
% The format of the GetPixelCacheStorageClass() method is:
%
% ClassType GetPixelCacheStorageClass(Cache cache)
%
% A description of each parameter follows:
%
% o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass.
%
% o cache: the pixel cache.
%
*/
MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
return(cache_info->storage_class);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e T i l e S i z e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheTileSize() returns the pixel cache tile size.
%
% The format of the GetPixelCacheTileSize() method is:
%
% void GetPixelCacheTileSize(const Image *image,size_t *width,
% size_t *height)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the optimized cache tile width in pixels.
%
% o height: the optimized cache tile height in pixels.
%
*/
MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width,
size_t *height)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
*width=2048UL/(cache_info->number_channels*sizeof(Quantum));
if (GetImagePixelCacheType(image) == DiskCache)
*width=8192UL/(cache_info->number_channels*sizeof(Quantum));
*height=(*width);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the
% pixel cache. A virtual pixel is any pixel access that is outside the
% boundaries of the image cache.
%
% The format of the GetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
return(cache_info->virtual_pixel_method);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromCache() returns the meta-content corresponding with
% the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualMetacontentFromCache() method is:
%
% void *GetVirtualMetacontentFromCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const void *GetVirtualMetacontentFromCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontentFromNexus() returns the meta-content for the specified
% cache nexus.
%
% The format of the GetVirtualMetacontentFromNexus() method is:
%
% const void *GetVirtualMetacontentFromNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the meta-content.
%
*/
MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((void *) NULL);
return(nexus_info->metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualMetacontent() returns the virtual metacontent corresponding with
% the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is
% returned if the meta-content are not available.
%
% The format of the GetVirtualMetacontent() method is:
%
% const void *GetVirtualMetacontent(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const void *GetVirtualMetacontent(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const void
*magick_restrict metacontent;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image);
if (metacontent != (void *) NULL)
return(metacontent);
assert(id < (int) cache_info->number_threads);
metacontent=GetVirtualMetacontentFromNexus(cache_info,
cache_info->nexus_info[id]);
return(metacontent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk
% pixel cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCacheNexus() method is:
%
% Quantum *GetVirtualPixelCacheNexus(const Image *image,
% const VirtualPixelMethod method,const ssize_t x,const ssize_t y,
% const size_t columns,const size_t rows,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to acquire.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t
DitherMatrix[64] =
{
0, 48, 12, 60, 3, 51, 15, 63,
32, 16, 44, 28, 35, 19, 47, 31,
8, 56, 4, 52, 11, 59, 7, 55,
40, 24, 36, 20, 43, 27, 39, 23,
2, 50, 14, 62, 1, 49, 13, 61,
34, 18, 46, 30, 33, 17, 45, 29,
10, 58, 6, 54, 9, 57, 5, 53,
42, 26, 38, 22, 41, 25, 37, 21
};
static inline ssize_t DitherX(const ssize_t x,const size_t columns)
{
ssize_t
index;
index=x+DitherMatrix[x & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) columns)
return((ssize_t) columns-1L);
return(index);
}
static inline ssize_t DitherY(const ssize_t y,const size_t rows)
{
ssize_t
index;
index=y+DitherMatrix[y & 0x07]-32L;
if (index < 0L)
return(0L);
if (index >= (ssize_t) rows)
return((ssize_t) rows-1L);
return(index);
}
static inline ssize_t EdgeX(const ssize_t x,const size_t columns)
{
if (x < 0L)
return(0L);
if (x >= (ssize_t) columns)
return((ssize_t) (columns-1));
return(x);
}
static inline ssize_t EdgeY(const ssize_t y,const size_t rows)
{
if (y < 0L)
return(0L);
if (y >= (ssize_t) rows)
return((ssize_t) (rows-1));
return(y);
}
static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns)
{
return((ssize_t) (columns*GetPseudoRandomValue(random_info)));
}
static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows)
{
return((ssize_t) (rows*GetPseudoRandomValue(random_info)));
}
static inline MagickModulo VirtualPixelModulo(const ssize_t offset,
const size_t extent)
{
MagickModulo
modulo;
modulo.quotient=offset/((ssize_t) extent);
modulo.remainder=offset % ((ssize_t) extent);
if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0))
{
modulo.quotient-=1;
modulo.remainder+=((ssize_t) extent);
}
return(modulo);
}
MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
length,
number_pixels;
NexusInfo
*magick_restrict virtual_nexus;
Quantum
*magick_restrict pixels,
virtual_pixel[MaxPixelChannels];
register const Quantum
*magick_restrict p;
register const void
*magick_restrict r;
register Quantum
*magick_restrict q;
register ssize_t
i,
u;
register unsigned char
*magick_restrict s;
ssize_t
v;
void
*magick_restrict virtual_metacontent;
/*
Acquire pixels.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((const Quantum *) NULL);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
if (pixels == (Quantum *) NULL)
return((const Quantum *) NULL);
q=pixels;
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+
nexus_info->region.width-1L;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels))
if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) &&
(y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows))
{
MagickBooleanType
status;
/*
Pixel request is inside cache extents.
*/
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(q);
status=ReadPixelCachePixels(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
if (cache_info->metacontent_extent != 0)
{
status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception);
if (status == MagickFalse)
return((const Quantum *) NULL);
}
return(q);
}
/*
Pixel request is outside cache extents.
*/
virtual_nexus=nexus_info->virtual_nexus;
s=(unsigned char *) nexus_info->metacontent;
(void) memset(virtual_pixel,0,cache_info->number_channels*
sizeof(*virtual_pixel));
virtual_metacontent=(void *) NULL;
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
case EdgeVirtualPixelMethod:
case CheckerTileVirtualPixelMethod:
case HorizontalTileVirtualPixelMethod:
case VerticalTileVirtualPixelMethod:
{
if (cache_info->metacontent_extent != 0)
{
/*
Acquire a metacontent buffer.
*/
virtual_metacontent=(void *) AcquireQuantumMemory(1,
cache_info->metacontent_extent);
if (virtual_metacontent == (void *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
CacheError,"UnableToGetCacheNexus","`%s'",image->filename);
return((const Quantum *) NULL);
}
(void) memset(virtual_metacontent,0,cache_info->metacontent_extent);
}
switch (virtual_pixel_method)
{
case BlackVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case GrayVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange/2,
virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
case TransparentVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel);
SetPixelAlpha(image,TransparentAlpha,virtual_pixel);
break;
}
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
for (i=0; i < (ssize_t) cache_info->number_channels; i++)
SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel);
SetPixelAlpha(image,OpaqueAlpha,virtual_pixel);
break;
}
default:
{
SetPixelRed(image,ClampToQuantum(image->background_color.red),
virtual_pixel);
SetPixelGreen(image,ClampToQuantum(image->background_color.green),
virtual_pixel);
SetPixelBlue(image,ClampToQuantum(image->background_color.blue),
virtual_pixel);
SetPixelBlack(image,ClampToQuantum(image->background_color.black),
virtual_pixel);
SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha),
virtual_pixel);
break;
}
}
break;
}
default:
break;
}
for (v=0; v < (ssize_t) rows; v++)
{
ssize_t
y_offset;
y_offset=y+v;
if ((virtual_pixel_method == EdgeVirtualPixelMethod) ||
(virtual_pixel_method == UndefinedVirtualPixelMethod))
y_offset=EdgeY(y_offset,cache_info->rows);
for (u=0; u < (ssize_t) columns; u+=length)
{
ssize_t
x_offset;
x_offset=x+u;
length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u);
if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) ||
((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) ||
(length == 0))
{
MagickModulo
x_modulo,
y_modulo;
/*
Transfer a single pixel.
*/
length=(MagickSizeType) 1;
switch (virtual_pixel_method)
{
case EdgeVirtualPixelMethod:
default:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),
EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,
nexus_info->virtual_nexus);
break;
}
case RandomVirtualPixelMethod:
{
if (cache_info->random_info == (RandomInfo *) NULL)
cache_info->random_info=AcquireRandomInfo();
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
RandomX(cache_info->random_info,cache_info->columns),
RandomY(cache_info->random_info,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case DitherVirtualPixelMethod:
{
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
DitherX(x_offset,cache_info->columns),
DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case TileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case MirrorVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
if ((x_modulo.quotient & 0x01) == 1L)
x_modulo.remainder=(ssize_t) cache_info->columns-
x_modulo.remainder-1L;
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if ((y_modulo.quotient & 0x01) == 1L)
y_modulo.remainder=(ssize_t) cache_info->rows-
y_modulo.remainder-1L;
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileEdgeVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileEdgeVirtualPixelMethod:
{
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL,
virtual_nexus,exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case BackgroundVirtualPixelMethod:
case BlackVirtualPixelMethod:
case GrayVirtualPixelMethod:
case TransparentVirtualPixelMethod:
case MaskVirtualPixelMethod:
case WhiteVirtualPixelMethod:
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
case CheckerTileVirtualPixelMethod:
{
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L)
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case HorizontalTileVirtualPixelMethod:
{
if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
case VerticalTileVirtualPixelMethod:
{
if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns))
{
p=virtual_pixel;
r=virtual_metacontent;
break;
}
x_modulo=VirtualPixelModulo(x_offset,cache_info->columns);
y_modulo=VirtualPixelModulo(y_offset,cache_info->rows);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,
x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus,
exception);
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
break;
}
}
if (p == (const Quantum *) NULL)
break;
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels;
if ((s != (void *) NULL) && (r != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) cache_info->metacontent_extent);
s+=cache_info->metacontent_extent;
}
continue;
}
/*
Transfer a run of pixels.
*/
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset,
(size_t) length,1UL,virtual_nexus,exception);
if (p == (const Quantum *) NULL)
break;
r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus);
(void) memcpy(q,p,(size_t) (cache_info->number_channels*length*
sizeof(*p)));
q+=cache_info->number_channels*length;
if ((r != (void *) NULL) && (s != (const void *) NULL))
{
(void) memcpy(s,r,(size_t) length);
s+=length*cache_info->metacontent_extent;
}
}
if (u < (ssize_t) columns)
break;
}
/*
Free resources.
*/
if (virtual_metacontent != (void *) NULL)
virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent);
if (v < (ssize_t) rows)
return((const Quantum *) NULL);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel
% cache as defined by the geometry parameters. A pointer to the pixels
% is returned if the pixels are transferred, otherwise a NULL is returned.
%
% The format of the GetVirtualPixelCache() method is:
%
% const Quantum *GetVirtualPixelCache(const Image *image,
% const VirtualPixelMethod virtual_pixel_method,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: the virtual pixel method.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static const Quantum *GetVirtualPixelCache(const Image *image,
const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y,
const size_t columns,const size_t rows,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows,
cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l Q u e u e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelQueue() returns the virtual pixels associated corresponding
% with the last call to QueueAuthenticPixels() or GetVirtualPixels().
%
% The format of the GetVirtualPixelQueue() method is:
%
% const Quantum *GetVirtualPixelQueue(const Image image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport const Quantum *GetVirtualPixelQueue(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixels_handler !=
(GetVirtualPixelsHandler) NULL)
return(cache_info->methods.get_virtual_pixels_handler(image));
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixels() returns an immutable pixel region. If the
% region is successfully accessed, a pointer to it is returned, otherwise
% NULL is returned. The returned pointer may point to a temporary working
% copy of the pixels or it may point to the original pixels in memory.
% Performance is maximized if the selected region is part of one row, or one
% or more full rows, since there is opportunity to access the pixels in-place
% (without a copy) if the image is in memory, or in a memory-mapped file. The
% returned pointer must *never* be deallocated by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% access the meta-content (of type void) corresponding to the the
% region.
%
% If you plan to modify the pixels, use GetAuthenticPixels() instead.
%
% Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread-
% safe. In a threaded environment, use GetCacheViewVirtualPixels() or
% GetCacheViewAuthenticPixels() instead.
%
% The format of the GetVirtualPixels() method is:
%
% const Quantum *GetVirtualPixels(const Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport const Quantum *GetVirtualPixels(const Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.get_virtual_pixel_handler !=
(GetVirtualPixelHandler) NULL)
return(cache_info->methods.get_virtual_pixel_handler(image,
GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception));
assert(id < (int) cache_info->number_threads);
p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y,
columns,rows,cache_info->nexus_info[id],exception);
return(p);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s F r o m C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsCache() returns the pixels associated corresponding with the
% last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache().
%
% The format of the GetVirtualPixelsCache() method is:
%
% Quantum *GetVirtualPixelsCache(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static const Quantum *GetVirtualPixelsCache(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id]));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t V i r t u a l P i x e l s N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetVirtualPixelsNexus() returns the pixels associated with the specified
% cache nexus.
%
% The format of the GetVirtualPixelsNexus() method is:
%
% const Quantum *GetVirtualPixelsNexus(const Cache cache,
% NexusInfo *nexus_info)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o nexus_info: the cache nexus to return the colormap pixels.
%
*/
MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache,
NexusInfo *magick_restrict nexus_info)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->storage_class == UndefinedClass)
return((Quantum *) NULL);
return((const Quantum *) nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ M a s k P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MaskPixelCacheNexus() masks the cache nexus as defined by the image mask.
% The method returns MagickTrue if the pixel region is masked, otherwise
% MagickFalse.
%
% The format of the MaskPixelCacheNexus() method is:
%
% MagickBooleanType MaskPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to clip.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum ApplyPixelCompositeMask(const Quantum p,
const MagickRealType alpha,const Quantum q,const MagickRealType beta)
{
double
mask_alpha;
Quantum
pixel;
if (fabs(alpha-OpaqueAlpha) < MagickEpsilon)
return(p);
mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta;
mask_alpha=PerceptibleReciprocal(mask_alpha);
pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q,
beta));
return(pixel);
}
static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickSizeType
number_pixels;
register Quantum
*magick_restrict p,
*magick_restrict q;
register ssize_t
n;
/*
Apply clip mask.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if ((image->channels & CompositeMaskChannel) == 0)
return(MagickTrue);
cache_info=(CacheInfo *) image->cache;
if (cache_info == (Cache) NULL)
return(MagickFalse);
p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y,
nexus_info->region.width,nexus_info->region.height,
nexus_info->virtual_nexus,exception);
q=nexus_info->pixels;
number_pixels=(MagickSizeType) nexus_info->region.width*
nexus_info->region.height;
for (n=0; n < (ssize_t) number_pixels; n++)
{
double
mask_alpha;
register ssize_t
i;
if (p == (Quantum *) NULL)
break;
mask_alpha=(double) GetPixelCompositeMask(image,p);
for (i=0; i < (ssize_t) image->number_channels; i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType)
GetPixelAlpha(image,q));
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(image);
}
if (n < (ssize_t) number_pixels)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p e n P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OpenPixelCache() allocates the pixel cache. This includes defining the cache
% dimensions, allocating space for the image pixels and optionally the
% metacontent, and memory mapping the cache if it is disk based. The cache
% nexus array is initialized as well.
%
% The format of the OpenPixelCache() method is:
%
% MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info,
const MapMode mode)
{
int
file;
/*
Open pixel cache on disk.
*/
if ((cache_info->file != -1) && (cache_info->disk_mode == mode))
return(MagickTrue); /* cache already open and in the proper mode */
if (*cache_info->cache_filename == '\0')
file=AcquireUniqueFileResource(cache_info->cache_filename);
else
switch (mode)
{
case ReadMode:
{
file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0);
break;
}
case WriteMode:
{
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT |
O_BINARY | O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE);
break;
}
case IOMode:
default:
{
file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY |
O_EXCL,S_MODE);
if (file == -1)
file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE);
break;
}
}
if (file == -1)
return(MagickFalse);
(void) AcquireMagickResource(FileResource,1);
if (cache_info->file != -1)
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->file=file;
cache_info->disk_mode=mode;
return(MagickTrue);
}
static inline MagickOffsetType WritePixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,const unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PWRITE)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PWRITE)
count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
count,
extent,
offset;
cache_info=(CacheInfo *) image->cache;
if (image->debug != MagickFalse)
{
char
format[MagickPathExtent],
message[MagickPathExtent];
(void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format);
(void) FormatLocaleString(message,MagickPathExtent,
"extend %s (%s[%d], disk, %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (length != (MagickSizeType) ((MagickOffsetType) length))
return(MagickFalse);
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END);
if (offset < 0)
return(MagickFalse);
if ((MagickSizeType) offset >= length)
count=(MagickOffsetType) 1;
else
{
extent=(MagickOffsetType) length-1;
count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *)
"");
if (count != 1)
return(MagickFalse);
#if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE)
if (cache_info->synchronize != MagickFalse)
if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0)
return(MagickFalse);
#endif
}
offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET);
if (offset < 0)
return(MagickFalse);
return(MagickTrue);
}
static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
source_info;
char
format[MagickPathExtent],
message[MagickPathExtent];
const char
*hosts,
*type;
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
size_t
columns,
packet_size;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (cache_anonymous_memory < 0)
{
char
*value;
/*
Does the security policy require anonymous mapping for pixel cache?
*/
cache_anonymous_memory=0;
value=GetPolicyValue("pixel-cache-memory");
if (value == (char *) NULL)
value=GetPolicyValue("cache:memory-map");
if (LocaleCompare(value,"anonymous") == 0)
{
#if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS)
cache_anonymous_memory=1;
#else
(void) ThrowMagickException(exception,GetMagickModule(),
MissingDelegateError,"DelegateLibrarySupportNotBuiltIn",
"'%s' (policy requires anonymous memory mapping)",image->filename);
#endif
}
value=DestroyString(value);
}
if ((image->columns == 0) || (image->rows == 0))
ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (((MagickSizeType) image->columns > cache_info->width_limit) ||
((MagickSizeType) image->rows > cache_info->height_limit))
ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit",
image->filename);
length=GetImageListLength(image);
if (AcquireMagickResource(ListLengthResource,length) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit",
image->filename);
source_info=(*cache_info);
source_info.file=(-1);
(void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]",
image->filename,(double) image->scene);
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->alpha_trait=image->alpha_trait;
cache_info->channels=image->channels;
cache_info->rows=image->rows;
cache_info->columns=image->columns;
InitializePixelChannelMap(image);
cache_info->number_channels=GetPixelChannels(image);
(void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels*
sizeof(*image->channel_map));
cache_info->metacontent_extent=image->metacontent_extent;
cache_info->mode=mode;
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
packet_size=cache_info->number_channels*sizeof(Quantum);
if (image->metacontent_extent != 0)
packet_size+=cache_info->metacontent_extent;
length=number_pixels*packet_size;
columns=(size_t) (length/cache_info->rows/packet_size);
if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) ||
((ssize_t) cache_info->rows < 0))
ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed",
image->filename);
cache_info->length=length;
if (image->ping != MagickFalse)
{
cache_info->storage_class=image->storage_class;
cache_info->colorspace=image->colorspace;
cache_info->type=PingCache;
return(MagickTrue);
}
status=AcquireMagickResource(AreaResource,(MagickSizeType)
cache_info->columns*cache_info->rows);
if (cache_info->mode == PersistMode)
status=MagickFalse;
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if ((status != MagickFalse) &&
(length == (MagickSizeType) ((size_t) length)) &&
((cache_info->type == UndefinedCache) ||
(cache_info->type == MemoryCache)))
{
status=AcquireMagickResource(MemoryResource,cache_info->length);
if (status != MagickFalse)
{
status=MagickTrue;
if (cache_anonymous_memory <= 0)
{
cache_info->mapped=MagickFalse;
cache_info->pixels=(Quantum *) MagickAssumeAligned(
AcquireAlignedMemory(1,(size_t) cache_info->length));
}
else
{
cache_info->mapped=MagickTrue;
cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t)
cache_info->length);
}
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
}
else
{
/*
Create memory pixel cache.
*/
cache_info->type=MemoryCache;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->mapped != MagickFalse ?
"Anonymous" : "Heap",type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
cache_info->storage_class=image->storage_class;
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=AcquireMagickResource(DiskResource,cache_info->length);
hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts",
exception);
if ((status == MagickFalse) && (hosts != (const char *) NULL))
{
DistributeCacheInfo
*server_info;
/*
Distribute the pixel cache to a remote server.
*/
server_info=AcquireDistributeCacheInfo(exception);
if (server_info != (DistributeCacheInfo *) NULL)
{
status=OpenDistributePixelCache(server_info,image);
if (status == MagickFalse)
{
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
GetDistributeCacheHostname(server_info));
server_info=DestroyDistributeCacheInfo(server_info);
}
else
{
/*
Create a distributed pixel cache.
*/
status=MagickTrue;
cache_info->type=DistributedCache;
cache_info->server_info=server_info;
(void) FormatLocaleString(cache_info->cache_filename,
MagickPathExtent,"%s:%d",GetDistributeCacheHostname(
(DistributeCacheInfo *) cache_info->server_info),
GetDistributeCachePort((DistributeCacheInfo *)
cache_info->server_info));
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
GetDistributeCacheFile((DistributeCacheInfo *)
cache_info->server_info),type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
/*
Create pixel cache on disk.
*/
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) &&
(cache_info->mode != PersistMode))
{
(void) ClosePixelCacheOnDisk(cache_info);
*cache_info->cache_filename='\0';
}
if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToOpenPixelCache",
image->filename);
return(MagickFalse);
}
status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+
cache_info->length);
if (status == MagickFalse)
{
cache_info->type=UndefinedCache;
ThrowFileException(exception,CacheError,"UnableToExtendCache",
image->filename);
return(MagickFalse);
}
length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+
cache_info->metacontent_extent);
if (length != (MagickSizeType) ((size_t) length))
cache_info->type=DiskCache;
else
{
status=AcquireMagickResource(MapResource,cache_info->length);
if (status == MagickFalse)
cache_info->type=DiskCache;
else
if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache))
{
cache_info->type=DiskCache;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode,
cache_info->offset,(size_t) cache_info->length);
if (cache_info->pixels == (Quantum *) NULL)
{
cache_info->type=DiskCache;
cache_info->mapped=source_info.mapped;
cache_info->pixels=source_info.pixels;
RelinquishMagickResource(MapResource,cache_info->length);
}
else
{
/*
Create file-backed memory-mapped pixel cache.
*/
(void) ClosePixelCacheOnDisk(cache_info);
cache_info->type=MapCache;
cache_info->mapped=MagickTrue;
cache_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
cache_info->metacontent=(void *) (cache_info->pixels+
cache_info->number_channels*number_pixels);
if ((source_info.storage_class != UndefinedClass) &&
(mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,
exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickTrue,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",
cache_info->filename,cache_info->cache_filename,
cache_info->file,type,(double) cache_info->columns,
(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",
message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
}
}
status=MagickTrue;
if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode))
{
status=ClonePixelCacheRepository(cache_info,&source_info,exception);
RelinquishPixelCachePixels(&source_info);
}
if (image->debug != MagickFalse)
{
(void) FormatMagickSize(cache_info->length,MagickFalse,"B",
MagickPathExtent,format);
type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t)
cache_info->type);
(void) FormatLocaleString(message,MagickPathExtent,
"open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename,
cache_info->cache_filename,cache_info->file,type,(double)
cache_info->columns,(double) cache_info->rows,(double)
cache_info->number_channels,format);
(void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message);
}
if (status == 0)
{
cache_info->type=UndefinedCache;
return(MagickFalse);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ P e r s i s t P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PersistPixelCache() attaches to or initializes a persistent pixel cache. A
% persistent pixel cache is one that resides on disk and is not destroyed
% when the program exits.
%
% The format of the PersistPixelCache() method is:
%
% MagickBooleanType PersistPixelCache(Image *image,const char *filename,
% const MagickBooleanType attach,MagickOffsetType *offset,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o filename: the persistent pixel cache filename.
%
% o attach: A value other than zero initializes the persistent pixel cache.
%
% o initialize: A value other than zero initializes the persistent pixel
% cache.
%
% o offset: the offset in the persistent cache to store pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType PersistPixelCache(Image *image,
const char *filename,const MagickBooleanType attach,MagickOffsetType *offset,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info,
*magick_restrict clone_info;
MagickBooleanType
status;
ssize_t
page_size;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (void *) NULL);
assert(filename != (const char *) NULL);
assert(offset != (MagickOffsetType *) NULL);
page_size=GetMagickPageSize();
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
CopyOpenCLBuffer(cache_info);
#endif
if (attach != MagickFalse)
{
/*
Attach existing persistent pixel cache.
*/
if (image->debug != MagickFalse)
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"attach persistent cache");
(void) CopyMagickString(cache_info->cache_filename,filename,
MagickPathExtent);
cache_info->type=MapCache;
cache_info->offset=(*offset);
if (OpenPixelCache(image,ReadMode,exception) == MagickFalse)
return(MagickFalse);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
return(MagickTrue);
}
/*
Clone persistent pixel cache.
*/
status=AcquireMagickResource(DiskResource,cache_info->length);
if (status == MagickFalse)
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"CacheResourcesExhausted","`%s'",image->filename);
return(MagickFalse);
}
clone_info=(CacheInfo *) ClonePixelCache(cache_info);
clone_info->type=DiskCache;
(void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent);
clone_info->file=(-1);
clone_info->storage_class=cache_info->storage_class;
clone_info->colorspace=cache_info->colorspace;
clone_info->alpha_trait=cache_info->alpha_trait;
clone_info->channels=cache_info->channels;
clone_info->columns=cache_info->columns;
clone_info->rows=cache_info->rows;
clone_info->number_channels=cache_info->number_channels;
clone_info->metacontent_extent=cache_info->metacontent_extent;
clone_info->mode=PersistMode;
clone_info->length=cache_info->length;
(void) memcpy(clone_info->channel_map,cache_info->channel_map,
MaxPixelChannels*sizeof(*cache_info->channel_map));
clone_info->offset=(*offset);
status=ClonePixelCacheRepository(clone_info,cache_info,exception);
*offset+=cache_info->length+page_size-(cache_info->length % page_size);
clone_info=(CacheInfo *) DestroyPixelCache(clone_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelCacheNexus() method is:
%
% Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% const MagickBooleanType clone,NexusInfo *nexus_info,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o nexus_info: the cache nexus to set.
%
% o clone: clone the pixel cache.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image,
const ssize_t x,const ssize_t y,const size_t columns,const size_t rows,
const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickOffsetType
offset;
MagickSizeType
number_pixels;
Quantum
*magick_restrict pixels;
/*
Validate pixel cache geometry.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception);
if (cache_info == (Cache) NULL)
return((Quantum *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) ||
(y < 0) || (x >= (ssize_t) cache_info->columns) ||
(y >= (ssize_t) cache_info->rows))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"PixelsAreNotAuthentic","`%s'",image->filename);
return((Quantum *) NULL);
}
offset=(MagickOffsetType) y*cache_info->columns+x;
if (offset < 0)
return((Quantum *) NULL);
number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows;
offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1;
if ((MagickSizeType) offset >= number_pixels)
return((Quantum *) NULL);
/*
Return pixel cache.
*/
pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows,
((image->channels & WriteMaskChannel) != 0) ||
((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse,
nexus_info,exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Q u e u e A u t h e n t i c P i x e l s C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixelsCache() allocates an region to store image pixels as
% defined by the region rectangle and returns a pointer to the region. This
% region is subsequently transferred from the pixel cache with
% SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the
% pixels are transferred, otherwise a NULL is returned.
%
% The format of the QueueAuthenticPixelsCache() method is:
%
% Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Q u e u e A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% QueueAuthenticPixels() queues a mutable pixel region. If the region is
% successfully initialized a pointer to a Quantum array representing the
% region is returned, otherwise NULL is returned. The returned pointer may
% point to a temporary working buffer for the pixels or it may point to the
% final location of the pixels in memory.
%
% Write-only access means that any existing pixel values corresponding to
% the region are ignored. This is useful if the initial image is being
% created from scratch, or if the existing pixel values are to be
% completely replaced without need to refer to their pre-existing values.
% The application is free to read and write the pixel buffer returned by
% QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not
% initialize the pixel array values. Initializing pixel array values is the
% application's responsibility.
%
% Performance is maximized if the selected region is part of one row, or
% one or more full rows, since then there is opportunity to access the
% pixels in-place (without a copy) if the image is in memory, or in a
% memory-mapped file. The returned pointer must *never* be deallocated
% by the user.
%
% Pixels accessed via the returned pointer represent a simple array of type
% Quantum. If the image type is CMYK or the storage class is PseudoClass,
% call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to
% obtain the meta-content (of type void) corresponding to the region.
% Once the Quantum (and/or Quantum) array has been updated, the
% changes must be saved back to the underlying image using
% SyncAuthenticPixels() or they may be lost.
%
% The format of the QueueAuthenticPixels() method is:
%
% Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
% const ssize_t y,const size_t columns,const size_t rows,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o x,y,columns,rows: These values define the perimeter of a region of
% pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x,
const ssize_t y,const size_t columns,const size_t rows,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
Quantum
*magick_restrict pixels;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
{
pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y,
columns,rows,exception);
return(pixels);
}
assert(id < (int) cache_info->number_threads);
pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse,
cache_info->nexus_info[id],exception);
return(pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCacheMetacontent() reads metacontent from the specified region of
% the pixel cache.
%
% The format of the ReadPixelCacheMetacontent() method is:
%
% MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the metacontent.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickOffsetType ReadPixelCacheRegion(
const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset,
const MagickSizeType length,unsigned char *magick_restrict buffer)
{
register MagickOffsetType
i;
ssize_t
count;
#if !defined(MAGICKCORE_HAVE_PREAD)
if (lseek(cache_info->file,offset,SEEK_SET) < 0)
return((MagickOffsetType) -1);
#endif
count=0;
for (i=0; i < (MagickOffsetType) length; i+=count)
{
#if !defined(MAGICKCORE_HAVE_PREAD)
count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX));
#else
count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t)
SSIZE_MAX),offset+i);
#endif
if (count <= 0)
{
count=0;
if (errno != EINTR)
break;
}
}
return(i);
}
static MagickBooleanType ReadPixelCacheMetacontent(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register ssize_t
y;
register unsigned char
*magick_restrict q;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
q=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict p;
/*
Read meta-content from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->metacontent_extent*cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read meta content from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->metacontent_extent*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read metacontent from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e a d P i x e l C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReadPixelCachePixels() reads pixels from the specified region of the pixel
% cache.
%
% The format of the ReadPixelCachePixels() method is:
%
% MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to read the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType ReadPixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register Quantum
*magick_restrict q;
register ssize_t
y;
size_t
number_channels,
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns;
if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y)
return(MagickFalse);
offset+=nexus_info->region.x;
number_channels=cache_info->number_channels;
length=(MagickSizeType) number_channels*nexus_info->region.width*
sizeof(Quantum);
if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width)
return(MagickFalse);
rows=nexus_info->region.height;
extent=length*rows;
if ((extent == 0) || ((extent/length) != rows))
return(MagickFalse);
y=0;
q=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict p;
/*
Read pixels from memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
p=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
break;
}
case DiskCache:
{
/*
Read pixels from disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*q),length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
offset+=cache_info->columns;
q+=cache_info->number_channels*nexus_info->region.width;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Read pixels from distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=ReadDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(unsigned char *) q);
if (count != (MagickOffsetType) length)
break;
q+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToReadPixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e f e r e n c e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferencePixelCache() increments the reference count associated with the
% pixel cache returning a pointer to the cache.
%
% The format of the ReferencePixelCache method is:
%
% Cache ReferencePixelCache(Cache cache_info)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
*/
MagickPrivate Cache ReferencePixelCache(Cache cache)
{
CacheInfo
*magick_restrict cache_info;
assert(cache != (Cache *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
LockSemaphoreInfo(cache_info->semaphore);
cache_info->reference_count++;
UnlockSemaphoreInfo(cache_info->semaphore);
return(cache_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e C h a n n e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheChannels() resets the pixel cache channels.
%
% The format of the ResetPixelCacheChannels method is:
%
% void ResetPixelCacheChannels(Image *)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickPrivate void ResetPixelCacheChannels(Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
cache_info->number_channels=GetPixelChannels(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t C a c h e A n o n y m o u s M e m o r y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetCacheAnonymousMemory() resets the anonymous_memory value.
%
% The format of the ResetCacheAnonymousMemory method is:
%
% void ResetCacheAnonymousMemory(void)
%
*/
MagickPrivate void ResetCacheAnonymousMemory(void)
{
cache_anonymous_memory=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R e s e t P i x e l C a c h e E p o c h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetPixelCacheEpoch() resets the pixel cache epoch.
%
% The format of the ResetPixelCacheEpoch method is:
%
% void ResetPixelCacheEpoch(void)
%
*/
MagickPrivate void ResetPixelCacheEpoch(void)
{
cache_epoch=0;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e M e t h o d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheMethods() sets the image pixel methods to the specified ones.
%
% The format of the SetPixelCacheMethods() method is:
%
% SetPixelCacheMethods(Cache *,CacheMethods *cache_methods)
%
% A description of each parameter follows:
%
% o cache: the pixel cache.
%
% o cache_methods: Specifies a pointer to a CacheMethods structure.
%
*/
MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods)
{
CacheInfo
*magick_restrict cache_info;
GetOneAuthenticPixelFromHandler
get_one_authentic_pixel_from_handler;
GetOneVirtualPixelFromHandler
get_one_virtual_pixel_from_handler;
/*
Set cache pixel methods.
*/
assert(cache != (Cache) NULL);
assert(cache_methods != (CacheMethods *) NULL);
cache_info=(CacheInfo *) cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
cache_info->filename);
if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL)
cache_info->methods.get_virtual_pixel_handler=
cache_methods->get_virtual_pixel_handler;
if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL)
cache_info->methods.destroy_pixel_handler=
cache_methods->destroy_pixel_handler;
if (cache_methods->get_virtual_metacontent_from_handler !=
(GetVirtualMetacontentFromHandler) NULL)
cache_info->methods.get_virtual_metacontent_from_handler=
cache_methods->get_virtual_metacontent_from_handler;
if (cache_methods->get_authentic_pixels_handler !=
(GetAuthenticPixelsHandler) NULL)
cache_info->methods.get_authentic_pixels_handler=
cache_methods->get_authentic_pixels_handler;
if (cache_methods->queue_authentic_pixels_handler !=
(QueueAuthenticPixelsHandler) NULL)
cache_info->methods.queue_authentic_pixels_handler=
cache_methods->queue_authentic_pixels_handler;
if (cache_methods->sync_authentic_pixels_handler !=
(SyncAuthenticPixelsHandler) NULL)
cache_info->methods.sync_authentic_pixels_handler=
cache_methods->sync_authentic_pixels_handler;
if (cache_methods->get_authentic_pixels_from_handler !=
(GetAuthenticPixelsFromHandler) NULL)
cache_info->methods.get_authentic_pixels_from_handler=
cache_methods->get_authentic_pixels_from_handler;
if (cache_methods->get_authentic_metacontent_from_handler !=
(GetAuthenticMetacontentFromHandler) NULL)
cache_info->methods.get_authentic_metacontent_from_handler=
cache_methods->get_authentic_metacontent_from_handler;
get_one_virtual_pixel_from_handler=
cache_info->methods.get_one_virtual_pixel_from_handler;
if (get_one_virtual_pixel_from_handler !=
(GetOneVirtualPixelFromHandler) NULL)
cache_info->methods.get_one_virtual_pixel_from_handler=
cache_methods->get_one_virtual_pixel_from_handler;
get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
if (get_one_authentic_pixel_from_handler !=
(GetOneAuthenticPixelFromHandler) NULL)
cache_info->methods.get_one_authentic_pixel_from_handler=
cache_methods->get_one_authentic_pixel_from_handler;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t P i x e l C a c h e N e x u s P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheNexusPixels() defines the region of the cache for the
% specified cache nexus.
%
% The format of the SetPixelCacheNexusPixels() method is:
%
% Quantum SetPixelCacheNexusPixels(
% const CacheInfo *magick_restrict cache_info,const MapMode mode,
% const ssize_t x,const ssize_t y,const size_t width,const size_t height,
% const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info,$ ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o mode: ReadMode, WriteMode, or IOMode.
%
% o x,y,width,height: define the region of this particular cache nexus.
%
% o buffered: if true, nexus pixels are buffered.
%
% o nexus_info: the cache nexus to set.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline MagickBooleanType AcquireCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MagickSizeType length,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
if (length != (MagickSizeType) ((size_t) length))
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=0;
nexus_info->mapped=MagickFalse;
if (cache_anonymous_memory <= 0)
{
nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1,
(size_t) length));
if (nexus_info->cache != (Quantum *) NULL)
(void) memset(nexus_info->cache,0,(size_t) length);
}
else
{
nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length);
if (nexus_info->cache != (Quantum *) NULL)
nexus_info->mapped=MagickTrue;
}
if (nexus_info->cache == (Quantum *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"PixelCacheAllocationFailed","`%s'",
cache_info->filename);
return(MagickFalse);
}
nexus_info->length=length;
return(MagickTrue);
}
static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info,
const MapMode mode)
{
if (nexus_info->length < CACHE_LINE_SIZE)
return;
if (mode == ReadMode)
{
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,
0,1);
return;
}
MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1);
}
static Quantum *SetPixelCacheNexusPixels(
const CacheInfo *magick_restrict cache_info,const MapMode mode,
const ssize_t x,const ssize_t y,const size_t width,const size_t height,
const MagickBooleanType buffered,NexusInfo *nexus_info,
ExceptionInfo *exception)
{
MagickBooleanType
status;
MagickSizeType
length,
number_pixels;
assert(cache_info != (const CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return((Quantum *) NULL);
if ((width == 0) || (height == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CacheError,
"NoPixelsDefinedInCache","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
(void) memset(&nexus_info->region,0,sizeof(nexus_info->region));
assert(nexus_info->signature == MagickCoreSignature);
if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) &&
(buffered == MagickFalse))
{
if (((x >= 0) && (y >= 0) &&
(((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) &&
(((x == 0) && (width == cache_info->columns)) || ((height == 1) &&
(((ssize_t) width+x-1) < (ssize_t) cache_info->columns))))
{
MagickOffsetType
offset;
/*
Pixels are accessed directly from memory.
*/
offset=(MagickOffsetType) y*cache_info->columns+x;
nexus_info->pixels=cache_info->pixels+cache_info->number_channels*
offset;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(unsigned char *) cache_info->metacontent+
offset*cache_info->metacontent_extent;
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=MagickTrue;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
}
/*
Pixels are stored in a staging region until they are synced to the cache.
*/
if (((MagickSizeType) width > cache_info->width_limit) ||
((MagickSizeType) height > cache_info->height_limit))
{
(void) ThrowMagickException(exception,GetMagickModule(),ImageError,
"WidthOrHeightExceedsLimit","`%s'",cache_info->filename);
return((Quantum *) NULL);
}
number_pixels=(MagickSizeType) width*height;
length=MagickMax(number_pixels,MagickMax(cache_info->columns,
cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels);
if (cache_info->metacontent_extent != 0)
length+=number_pixels*cache_info->metacontent_extent;
status=MagickTrue;
if (nexus_info->cache == (Quantum *) NULL)
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
else
if (nexus_info->length < length)
{
RelinquishCacheNexusPixels(nexus_info);
status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception);
}
if (status == MagickFalse)
return((Quantum *) NULL);
nexus_info->pixels=nexus_info->cache;
nexus_info->metacontent=(void *) NULL;
if (cache_info->metacontent_extent != 0)
nexus_info->metacontent=(void *) (nexus_info->pixels+
cache_info->number_channels*number_pixels);
nexus_info->region.width=width;
nexus_info->region.height=height;
nexus_info->region.x=x;
nexus_info->region.y=y;
nexus_info->authentic_pixel_cache=cache_info->type == PingCache ?
MagickTrue : MagickFalse;
PrefetchPixelCacheNexusPixels(nexus_info,mode);
return(nexus_info->pixels);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t P i x e l C a c h e V i r t u a l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the
% pixel cache and returns the previous setting. A virtual pixel is any pixel
% access that is outside the boundaries of the image cache.
%
% The format of the SetPixelCacheVirtualMethod() method is:
%
% VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
CacheView
*magick_restrict image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
status=SyncCacheViewAuthenticPixels(image_view,exception);
}
image_view=DestroyCacheView(image_view);
return(status);
}
MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
VirtualPixelMethod
method;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
method=cache_info->virtual_pixel_method;
cache_info->virtual_pixel_method=virtual_pixel_method;
if ((image->columns != 0) && (image->rows != 0))
switch (virtual_pixel_method)
{
case BackgroundVirtualPixelMethod:
{
if ((image->background_color.alpha_trait != UndefinedPixelTrait) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
if ((IsPixelInfoGray(&image->background_color) == MagickFalse) &&
(IsGrayColorspace(image->colorspace) != MagickFalse))
(void) SetImageColorspace(image,sRGBColorspace,exception);
break;
}
case TransparentVirtualPixelMethod:
{
if (image->alpha_trait == UndefinedPixelTrait)
(void) SetCacheAlphaChannel(image,OpaqueAlpha,exception);
break;
}
default:
break;
}
return(method);
}
#if defined(MAGICKCORE_OPENCL_SUPPORT)
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c O p e n C L B u f f e r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have
% been completed and updates the host memory.
%
% The format of the SyncAuthenticOpenCLBuffer() method is:
%
% void SyncAuthenticOpenCLBuffer(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info)
{
assert(cache_info != (CacheInfo *) NULL);
assert(cache_info->signature == MagickCoreSignature);
if ((cache_info->type != MemoryCache) ||
(cache_info->opencl == (MagickCLCacheInfo) NULL))
return;
/*
Ensure single threaded access to OpenCL environment.
*/
LockSemaphoreInfo(cache_info->semaphore);
cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl);
UnlockSemaphoreInfo(cache_info->semaphore);
}
MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (const Image *) NULL);
cache_info=(CacheInfo *) image->cache;
CopyOpenCLBuffer(cache_info);
}
#endif
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e N e x u s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the
% in-memory or disk cache. The method returns MagickTrue if the pixel region
% is synced, otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelCacheNexus() method is:
%
% MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o nexus_info: the cache nexus to sync.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
MagickBooleanType
status;
/*
Transfer pixels to the cache.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->cache == (Cache) NULL)
ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->type == UndefinedCache)
return(MagickFalse);
if (image->mask_trait != UpdatePixelTrait)
{
if (((image->channels & WriteMaskChannel) != 0) &&
(ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (((image->channels & CompositeMaskChannel) != 0) &&
(MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse))
return(MagickFalse);
}
if (nexus_info->authentic_pixel_cache != MagickFalse)
{
image->taint=MagickTrue;
return(MagickTrue);
}
assert(cache_info->signature == MagickCoreSignature);
status=WritePixelCachePixels(cache_info,nexus_info,exception);
if ((cache_info->metacontent_extent != 0) &&
(WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse))
return(MagickFalse);
if (status != MagickFalse)
image->taint=MagickTrue;
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c A u t h e n t i c P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory
% or disk cache. The method returns MagickTrue if the pixel region is synced,
% otherwise MagickFalse.
%
% The format of the SyncAuthenticPixelsCache() method is:
%
% MagickBooleanType SyncAuthenticPixelsCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType SyncAuthenticPixelsCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncAuthenticPixels() method is:
%
% MagickBooleanType SyncAuthenticPixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncAuthenticPixels(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
const int
id = GetOpenMPThreadId();
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(image->cache != (Cache) NULL);
cache_info=(CacheInfo *) image->cache;
assert(cache_info->signature == MagickCoreSignature);
if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL)
{
status=cache_info->methods.sync_authentic_pixels_handler(image,
exception);
return(status);
}
assert(id < (int) cache_info->number_threads);
status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id],
exception);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e P i x e l C a c h e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImagePixelCache() saves the image pixels to the in-memory or disk cache.
% The method returns MagickTrue if the pixel region is flushed, otherwise
% MagickFalse.
%
% The format of the SyncImagePixelCache() method is:
%
% MagickBooleanType SyncImagePixelCache(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image,
ExceptionInfo *exception)
{
CacheInfo
*magick_restrict cache_info;
assert(image != (Image *) NULL);
assert(exception != (ExceptionInfo *) NULL);
cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception);
return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e P i x e l C a c h e M e t a c o n t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCacheMetacontent() writes the meta-content to the specified region
% of the pixel cache.
%
% The format of the WritePixelCacheMetacontent() method is:
%
% MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the meta-content.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info,
NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const unsigned char
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (cache_info->metacontent_extent == 0)
return(MagickFalse);
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) nexus_info->region.width*
cache_info->metacontent_extent;
extent=(MagickSizeType) length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=(unsigned char *) nexus_info->metacontent;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register unsigned char
*magick_restrict q;
/*
Write associated pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=(unsigned char *) cache_info->metacontent+offset*
cache_info->metacontent_extent;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=nexus_info->region.width*cache_info->metacontent_extent;
q+=cache_info->columns*cache_info->metacontent_extent;
}
break;
}
case DiskCache:
{
/*
Write associated pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
extent=(MagickSizeType) cache_info->columns*cache_info->rows;
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+extent*
cache_info->number_channels*sizeof(Quantum)+offset*
cache_info->metacontent_extent,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write metacontent to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->metacontent_extent*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ W r i t e C a c h e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% WritePixelCachePixels() writes image pixels to the specified region of the
% pixel cache.
%
% The format of the WritePixelCachePixels() method is:
%
% MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info,
% NexusInfo *nexus_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o cache_info: the pixel cache.
%
% o nexus_info: the cache nexus to write the pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType WritePixelCachePixels(
CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info,
ExceptionInfo *exception)
{
MagickOffsetType
count,
offset;
MagickSizeType
extent,
length;
register const Quantum
*magick_restrict p;
register ssize_t
y;
size_t
rows;
if (nexus_info->authentic_pixel_cache != MagickFalse)
return(MagickTrue);
offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+
nexus_info->region.x;
length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width*
sizeof(Quantum);
extent=length*nexus_info->region.height;
rows=nexus_info->region.height;
y=0;
p=nexus_info->pixels;
switch (cache_info->type)
{
case MemoryCache:
case MapCache:
{
register Quantum
*magick_restrict q;
/*
Write pixels to memory.
*/
if ((cache_info->columns == nexus_info->region.width) &&
(extent == (MagickSizeType) ((size_t) extent)))
{
length=extent;
rows=1UL;
}
q=cache_info->pixels+cache_info->number_channels*offset;
for (y=0; y < (ssize_t) rows; y++)
{
(void) memcpy(q,p,(size_t) length);
p+=cache_info->number_channels*nexus_info->region.width;
q+=cache_info->number_channels*cache_info->columns;
}
break;
}
case DiskCache:
{
/*
Write pixels to disk.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse)
{
ThrowFileException(exception,FileOpenError,"UnableToOpenFile",
cache_info->cache_filename);
UnlockSemaphoreInfo(cache_info->file_semaphore);
return(MagickFalse);
}
if ((cache_info->columns == nexus_info->region.width) &&
(extent <= MagickMaxBufferExtent))
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WritePixelCacheRegion(cache_info,cache_info->offset+offset*
cache_info->number_channels*sizeof(*p),length,(const unsigned char *)
p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
offset+=cache_info->columns;
}
if (IsFileDescriptorLimitExceeded() != MagickFalse)
(void) ClosePixelCacheOnDisk(cache_info);
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
case DistributedCache:
{
RectangleInfo
region;
/*
Write pixels to distributed cache.
*/
LockSemaphoreInfo(cache_info->file_semaphore);
region=nexus_info->region;
if ((cache_info->columns != nexus_info->region.width) ||
(extent > MagickMaxBufferExtent))
region.height=1UL;
else
{
length=extent;
rows=1UL;
}
for (y=0; y < (ssize_t) rows; y++)
{
count=WriteDistributePixelCachePixels((DistributeCacheInfo *)
cache_info->server_info,®ion,length,(const unsigned char *) p);
if (count != (MagickOffsetType) length)
break;
p+=cache_info->number_channels*nexus_info->region.width;
region.y++;
}
UnlockSemaphoreInfo(cache_info->file_semaphore);
break;
}
default:
break;
}
if (y < (ssize_t) rows)
{
ThrowFileException(exception,CacheError,"UnableToWritePixelCache",
cache_info->cache_filename);
return(MagickFalse);
}
if ((cache_info->debug != MagickFalse) &&
(CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse))
(void) LogMagickEvent(CacheEvent,GetMagickModule(),
"%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double)
nexus_info->region.width,(double) nexus_info->region.height,(double)
nexus_info->region.x,(double) nexus_info->region.y);
return(MagickTrue);
}
|
GB_binop__ge_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_fp64
// A.*B function (eWiseMult): GB_AemultB__ge_fp64
// A*D function (colscale): GB_AxD__ge_fp64
// D*A function (rowscale): GB_DxB__ge_fp64
// C+=B function (dense accum): GB_Cdense_accumB__ge_fp64
// C+=b function (dense accum): GB_Cdense_accumb__ge_fp64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_fp64
// C=scalar+B GB_bind1st__ge_fp64
// C=scalar+B' GB_bind1st_tran__ge_fp64
// C=A+scalar GB_bind2nd__ge_fp64
// C=A'+scalar GB_bind2nd_tran__ge_fp64
// C type: bool
// A type: double
// B,b type: double
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
double bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_FP64 || GxB_NO_GE_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_fp64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_fp64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_fp64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_fp64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
cancel-test.h | #include <stdlib.h>
#include <omp.h>
struct S
{
static int s;
int v;
S ()
{
#pragma omp atomic
s++;
}
S (int x)
{
#pragma omp atomic
s++;
v = x;
}
~S ()
{
#pragma omp atomic
s--;
}
S (const S &x)
{
#pragma omp atomic
s++;
v = x.v;
}
static void
verify ()
{
if (s) abort ();
}
void
bump ()
{
v++;
}
};
int S::s = 0;
|
m_sparsetools.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include<stdio.h>
#include<stdlib.h>
#include <omp.h>
#include <cblas.h>
/*
!
!
!Compute Y += A*X for CSR matrix A and dense vectors X,Y
! From scipy/sparse/sparsetools/csr.h
!
!
! Input Arguments:
! I n_row - number of rows in A
! I n_col - number of columns in A
! I Ap[n_row+1] - row pointer
! I Aj[nnz(A)] - column indices
! T Ax[nnz(A)] - nonzeros
! T Xx[n_col] - input vector
!
! Output Arguments:
! T Yx[n_row] - output vector
!
! Note:
! Output array Yx must be preallocated
!
! Complexity: Linear. Specifically O(nnz(A) + n_row)
*/
void scsr_matvec(int nrow, int ncol, int nnz, int *Ap, int *Aj,
float *Ax, float *Xx, float *Yx)
{
int i, jj;
float sum = 0.0;
# pragma omp parallel \
shared (nrow, Yx, Ap, Ax, Xx, Aj) \
private (i, jj, sum)
{
#pragma omp for
for(i = 0; i < nrow; i++){
sum = Yx[i];
for(jj = Ap[i]; jj < Ap[i+1]; jj++){
sum += Ax[jj] * Xx[Aj[jj]];
}
Yx[i] = sum;
}
}
}
void dcsr_matvec(int nrow, int ncol, int nnz, int *Ap, int *Aj,
double *Ax, double *Xx, double *Yx)
{
int i, jj;
double sum = 0.0;
# pragma omp parallel \
shared (nrow, Yx, Ap, Ax, Xx, Aj) \
private (i, jj, sum)
{
#pragma omp for
for(i = 0; i < nrow; i++){
sum = Yx[i];
for(jj = Ap[i]; jj < Ap[i+1]; jj++){
sum += Ax[jj] * Xx[Aj[jj]];
}
Yx[i] = sum;
}
}
}
/*
* Compute Y += A*X for CSC matrix A and dense vectors X,Y
* From scipy/sparse/sparsetools/csc.h
*
*
* Input Arguments:
* I n_row - number of rows in A
* I n_col - number of columns in A
* I Ap[n_row+1] - column pointer
* I Ai[nnz(A)] - row indices
* T Ax[n_col] - nonzeros
* T Xx[n_col] - input vector
*
* Output Arguments:
* T Yx[n_row] - output vector
*
* Note:
* Output array Yx must be preallocated
*
* Complexity: Linear. Specifically O(nnz(A) + n_col)
*
*/
void scsc_matvec(int n_row, int n_col, int nnz,
int *Ap, int *Ai, float *Ax, float *Xx, float *Yx)
{
int col_start, col_end, j, ii, i;
for( j = 0; j < n_col; j++){
col_start = Ap[j];
col_end = Ap[j+1];
for( ii = col_start; ii < col_end; ii++){
i = Ai[ii];
Yx[i] += Ax[ii] * Xx[j];
}
}
}
void dcsc_matvec(int n_row, int n_col, int nnz,
int *Ap, int *Ai, double *Ax, double *Xx, double *Yx)
{
int col_start, col_end, j, ii, i;
for( j = 0; j < n_col; j++){
col_start = Ap[j];
col_end = Ap[j+1];
for( ii = col_start; ii < col_end; ii++){
i = Ai[ii];
Yx[i] += Ax[ii] * Xx[j];
}
}
}
/*
* Compute Y += A*X for CSC matrix A and dense block vectors X,Y
* From scipy/sparse/sparsetools/csc.h
*
*
* Input Arguments:
* I n_row - number of rows in A
* I n_col - number of columns in A
* I n_vecs - number of column vectors in X and Y
* I Ap[n_row+1] - row pointer
* I Aj[nnz(A)] - column indices
* T Ax[nnz(A)] - nonzeros
* T Xx[n_col,n_vecs] - input vector
*
* Output Arguments:
* T Yx[n_row,n_vecs] - output vector
*
* Note:
* Output array Yx must be preallocated
*
*/
void scsc_matvecs(int n_row, int n_col, int n_vecs,
int *Ap, int *Ai, float *Ax, float *Xx, float *Yx)
{
int i, j, ii;
/*
# pragma omp parallel \
shared (n_row, n_col, n_vecs, Ap, Ai, Ax, Xx, Yx) \
private (i, ii, j)
{
#pragma omp for
*/
for( j = 0; j < n_col; j++){
for( ii = Ap[j]; ii < Ap[j+1]; ii++){
i = Ai[ii];
//axpy(n_vecs, Ax[ii], Xx + (int)n_vecs * j, Yx + (int)n_vecs * i);
cblas_saxpy (n_vecs, Ax[ii], &Xx[n_vecs*j], 1, &Yx[n_vecs*i], 1);
}
}
//}
}
void dcsc_matvecs(int n_row, int n_col, int n_vecs,
int *Ap, int *Ai, double *Ax, double *Xx, double *Yx)
{
int i, j, ii;
/*
# pragma omp parallel \
shared (n_row, n_col, n_vecs, Ap, Ai, Ax, Xx, Yx) \
private (i, ii, j)
{
#pragma omp for
*/
for( j = 0; j < n_col; j++){
for( ii = Ap[j]; ii < Ap[j+1]; ii++){
i = Ai[ii];
//axpy(n_vecs, Ax[ii], Xx + (int)n_vecs * j, Yx + (int)n_vecs * i);
cblas_daxpy (n_vecs, Ax[ii], &Xx[n_vecs*j], 1, &Yx[n_vecs*i], 1);
}
}
//}
}
|
seq_multivector.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
/******************************************************************************
*
* Member functions for hypre_Vector class.
*
*****************************************************************************/
#include "seq_multivector.h"
#include "_hypre_utilities.h"
#include <stdlib.h>
#include <string.h>
#include <assert.h>
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCreate
*--------------------------------------------------------------------------*/
hypre_Multivector *
hypre_SeqMultivectorCreate( HYPRE_Int size, HYPRE_Int num_vectors )
{
hypre_Multivector *mvector;
mvector = (hypre_Multivector *) hypre_MAlloc(sizeof(hypre_Multivector), HYPRE_MEMORY_HOST);
hypre_MultivectorNumVectors(mvector) = num_vectors;
hypre_MultivectorSize(mvector) = size;
hypre_MultivectorOwnsData(mvector) = 1;
hypre_MultivectorData(mvector) = NULL;
mvector->num_active_vectors=0;
mvector->active_indices=NULL;
return mvector;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInitialize
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorInitialize( hypre_Multivector *mvector )
{
HYPRE_Int ierr = 0, i, size, num_vectors;
size = hypre_MultivectorSize(mvector);
num_vectors = hypre_MultivectorNumVectors(mvector);
if (NULL==hypre_MultivectorData(mvector))
hypre_MultivectorData(mvector) =
(HYPRE_Complex *) hypre_MAlloc(sizeof(HYPRE_Complex)*size*num_vectors, HYPRE_MEMORY_HOST);
/* now we create a "mask" of "active" vectors; initially all active */
if (NULL==mvector->active_indices)
{
mvector->active_indices hypre_CTAlloc(HYPRE_Int, num_vectors, HYPRE_MEMORY_HOST);
for (i=0; i<num_vectors; i++) mvector->active_indices[i] = i;
mvector->num_active_vectors=num_vectors;
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetDataOwner
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetDataOwner(hypre_Multivector *mvector, HYPRE_Int owns_data)
{
HYPRE_Int ierr=0;
hypre_MultivectorOwnsData(mvector) = owns_data;
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorDestroy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorDestroy(hypre_Multivector *mvector)
{
HYPRE_Int ierr=0;
if (NULL!=mvector)
{
if (hypre_MultivectorOwnsData(mvector) && NULL!=hypre_MultivectorData(mvector))
hypre_TFree( hypre_MultivectorData(mvector) , HYPRE_MEMORY_HOST);
if (NULL!=mvector->active_indices)
hypre_TFree(mvector->active_indices, HYPRE_MEMORY_HOST);
hypre_TFree(mvector, HYPRE_MEMORY_HOST);
}
return ierr;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetMask
* (this routine accepts mask in "zeros and ones format, and converts it to
the one used in the structure "hypre_Multivector")
*-------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetMask(hypre_Multivector *mvector, HYPRE_Int * mask)
{
HYPRE_Int i, num_vectors = mvector->num_vectors;
if (mvector->active_indices != NULL) hypre_TFree(mvector->active_indices, HYPRE_MEMORY_HOST);
mvector->active_indices hypre_CTAlloc(HYPRE_Int, num_vectors, HYPRE_MEMORY_HOST);
mvector->num_active_vectors=0;
if (mask!=NULL)
for (i=0; i<num_vectors; i++)
{
if ( mask[i] )
mvector->active_indices[mvector->num_active_vectors++]=i;
}
else
for (i=0; i<num_vectors; i++)
mvector->active_indices[mvector->num_active_vectors++]=i;
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetConstantValues
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetConstantValues(hypre_Multivector *v, HYPRE_Complex value)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
if (v->num_active_vectors == v->num_vectors)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < v->num_vectors*size; j++) vector_data[j] = value;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = start_offset; j < end_offset; j++) vector_data[j]= value;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorSetRandomValues
*
* returns vector of values randomly distributed between -1.0 and +1.0
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorSetRandomValues(hypre_Multivector *v, HYPRE_Int seed)
{
HYPRE_Int i, j, start_offset, end_offset;
HYPRE_Int size = hypre_MultivectorSize(v);
HYPRE_Complex *vector_data = hypre_MultivectorData(v);
hypre_SeedRand(seed);
/* comment from vector.c: RDF: threading this loop may cause problems
because of hypre_Rand() */
if (v->num_active_vectors == v->num_vectors)
{
for (j = 0; j < v->num_vectors*size; j++)
vector_data[j] = 2.0 * hypre_Rand() - 1.0;
}
else
{
for (i = 0; i < v->num_active_vectors; i++)
{
start_offset = v->active_indices[i]*size;
end_offset = start_offset+size;
for (j = start_offset; j < end_offset; j++)
vector_data[j]= 2.0 * hypre_Rand() - 1.0;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorCopy
* copies data from x to y
* y should have already been initialized at the same size as x
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorCopy(hypre_Multivector *x, hypre_Multivector *y)
{
HYPRE_Int i, size, num_bytes, num_active_vectors, *x_active_ind, * y_active_ind;
HYPRE_Complex *x_data, *y_data, *dest, * src;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
num_active_vectors = x->num_active_vectors;
size = x->size;
x_data = x->data;
y_data = y->data;
x_active_ind=x->active_indices;
y_active_ind=y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
num_bytes = x->num_vectors * size;
hypre_TMemcpy(y_data, x_data, HYPRE_Complex, num_bytes, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
else
{
num_bytes = size;
for (i=0; i < num_active_vectors; i++)
{
src=x_data + size * x_active_ind[i];
dest = y_data + size * y_active_ind[i];
hypre_Memcpy(dest, src, HYPRE_Complex, num_bytes, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
}
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorCopyWithoutMask(hypre_Multivector *x ,
hypre_Multivector *y)
{
HYPRE_Int byte_count;
hypre_assert (x->size == y->size && x->num_vectors == y->num_vectors);
byte_count = x->size * x->num_vectors;
hypre_Memcpy(y->data, x->data, HYPRE_Complex, byte_count, HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorAxpy
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorAxpy(HYPRE_Complex alpha, hypre_Multivector *x,
hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *src, *dest;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
if (x->num_active_vectors == x->num_vectors &&
y->num_active_vectors == y->num_vectors)
{
for(i = 0; i < x->num_vectors*size; i++) dest[i] += alpha * src[i];
}
else
{
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++) dest[j] += alpha * src[j];
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorByDiag: " y(<y_mask>) = alpha(<mask>) .* x(<x_mask>) "
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_SeqMultivectorByDiag(hypre_Multivector *x, HYPRE_Int *mask, HYPRE_Int n,
HYPRE_Complex *alpha, hypre_Multivector *y)
{
HYPRE_Int i, j, size, num_active_vectors, *x_active_ind, *y_active_ind;
HYPRE_Int *al_active_ind, num_active_als;
HYPRE_Complex *x_data, *y_data, *dest, *src, current_alpha;
hypre_assert (x->size == y->size && x->num_active_vectors == y->num_active_vectors);
/* build list of active indices in alpha */
al_active_ind = hypre_TAlloc(HYPRE_Int, n, HYPRE_MEMORY_HOST);
num_active_als = 0;
if (mask!=NULL)
for (i=0; i<n; i++)
{
if (mask[i])
al_active_ind[num_active_als++]=i;
}
else
for (i=0; i<n; i++)
al_active_ind[num_active_als++]=i;
hypre_assert (num_active_als==x->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(i = 0; i < num_active_vectors; i++)
{
src = x_data + x_active_ind[i]*size;
dest = y_data + y_active_ind[i]*size;
current_alpha=alpha[ al_active_ind[i] ];
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
#endif
for (j = 0; j < size; j++)
dest[j] = current_alpha*src[j];
}
hypre_TFree(al_active_ind, HYPRE_MEMORY_HOST);
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProd
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProd(hypre_Multivector *x, hypre_Multivector *y,
HYPRE_Real *results )
{
HYPRE_Int i, j, k, size, *x_active_ind, *y_active_ind;
HYPRE_Int x_num_active_vectors, y_num_active_vectors;
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
hypre_assert (x->size==y->size);
x_data = x->data;
y_data = y->data;
size = x->size;
x_num_active_vectors = x->num_active_vectors;
y_num_active_vectors = y->num_active_vectors;
/* we assume that "results" points to contiguous array of (x_num_active_vectors X
y_num_active_vectors) doubles */
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for(j = 0; j < y_num_active_vectors; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i = 0; i < x_num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k = 0; k < size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
/* column-wise storage for results */
*results++ = current_product;
}
}
return 0;
}
/*--------------------------------------------------------------------------
* hypre_SeqMultivectorInnerProdDiag
*--------------------------------------------------------------------------*/
HYPRE_Int hypre_SeqMultivectorInnerProdDiag(hypre_Multivector *x,
hypre_Multivector *y, HYPRE_Real *diagResults)
{
HYPRE_Complex *x_data, *y_data, *y_ptr, *x_ptr;
HYPRE_Real current_product;
HYPRE_Int i, k, size, num_active_vectors, *x_active_ind, *y_active_ind;
hypre_assert(x->size==y->size && x->num_active_vectors == y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
num_active_vectors = x->num_active_vectors;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
for (i=0; i<num_active_vectors; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
y_ptr = y_data + y_active_ind[i]*size;
current_product = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) reduction(+:current_product) HYPRE_SMP_SCHEDULE
#endif
for(k=0; k<size; k++)
current_product += x_ptr[k] * hypre_conj(y_ptr[k]);
*diagResults++ = current_product;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorByMatrix(hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
hypre_assert(rHeight>0);
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
/* ------ set current "y" to first member in a sum ------ */
x_ptr = x_data + x_active_ind[0]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] = current_coef * x_ptr[k];
/* ------ now add all other members of a sum to "y" ----- */
for (i=1; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
HYPRE_Int
hypre_SeqMultivectorXapy (hypre_Multivector *x, HYPRE_Int rGHeight, HYPRE_Int rHeight,
HYPRE_Int rWidth, HYPRE_Complex* rVal, hypre_Multivector *y)
{
HYPRE_Complex *x_data, *y_data, *x_ptr, *y_ptr, current_coef;
HYPRE_Int i, j, k, size, gap, *x_active_ind, *y_active_ind;
hypre_assert (rHeight==x->num_active_vectors && rWidth==y->num_active_vectors);
x_data = x->data;
y_data = y->data;
size = x->size;
x_active_ind = x->active_indices;
y_active_ind = y->active_indices;
gap = rGHeight - rHeight;
for (j=0; j<rWidth; j++)
{
y_ptr = y_data + y_active_ind[j]*size;
for (i=0; i<rHeight; i++)
{
x_ptr = x_data + x_active_ind[i]*size;
current_coef = *rVal++;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(k) HYPRE_SMP_SCHEDULE
#endif
for (k=0; k<size; k++)
y_ptr[k] += current_coef * x_ptr[k];
}
rVal += gap;
}
return 0;
}
|
GB_binop__land_bool.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_bool
// A.*B function (eWiseMult): GB_AemultB__land_bool
// A*D function (colscale): GB_AxD__land_bool
// D*A function (rowscale): GB_DxB__land_bool
// C+=B function (dense accum): GB_Cdense_accumB__land_bool
// C+=b function (dense accum): GB_Cdense_accumb__land_bool
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_bool
// C=scalar+B GB_bind1st__land_bool
// C=scalar+B' GB_bind1st_tran__land_bool
// C=A+scalar GB_bind2nd__land_bool
// C=A'+scalar GB_bind2nd_tran__land_bool
// C type: bool
// A type: bool
// B,b type: bool
// BinaryOp: cij = (aij && bij)
#define GB_ATYPE \
bool
#define GB_BTYPE \
bool
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
bool aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
bool bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x && y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_BOOL || GxB_NO_LAND_BOOL)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_bool
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_bool
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type bool
bool bwork = (*((bool *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_bool
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_bool
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__land_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_bool
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_bool
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
bool x = (*((bool *) x_input)) ;
bool *Bx = (bool *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool bij = Bx [p] ;
Cx [p] = (x && bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_bool
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
bool *Ax = (bool *) Ax_input ;
bool y = (*((bool *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
bool aij = Ax [p] ;
Cx [p] = (aij && y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (x && aij) ; \
}
GrB_Info GB_bind1st_tran__land_bool
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
bool
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool x = (*((const bool *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
bool
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
bool aij = Ax [pA] ; \
Cx [pC] = (aij && y) ; \
}
GrB_Info GB_bind2nd_tran__land_bool
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool y = (*((const bool *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__minus_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__minus_int32
// A.*B function (eWiseMult): GB_AemultB__minus_int32
// A*D function (colscale): GB_AxD__minus_int32
// D*A function (rowscale): GB_DxB__minus_int32
// C+=B function (dense accum): GB_Cdense_accumB__minus_int32
// C+=b function (dense accum): GB_Cdense_accumb__minus_int32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__minus_int32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__minus_int32
// C=scalar+B GB_bind1st__minus_int32
// C=scalar+B' GB_bind1st_tran__minus_int32
// C=A+scalar GB_bind2nd__minus_int32
// C=A'+scalar GB_bind2nd_tran__minus_int32
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij - bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x - y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_MINUS || GxB_NO_INT32 || GxB_NO_MINUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__minus_int32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__minus_int32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *GB_RESTRICT Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__minus_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__minus_int32
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__minus_int32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = Bx [p] ;
Cx [p] = (x - bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__minus_int32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = Ax [p] ;
Cx [p] = (aij - y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (x - aij) ; \
}
GrB_Info GB_bind1st_tran__minus_int32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = Ax [pA] ; \
Cx [pC] = (aij - y) ; \
}
GrB_Info GB_bind2nd_tran__minus_int32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Oscillators.h | #pragma once
#include <vector>
#include <array>
#include <math.h>
#include "Common.h"
#include "Delay.h"
#include "Filter.h"
using namespace std;
extern const vector<float> tableA;
extern const vector<float> tableB;
extern const vector<float> tableC;
extern const vector<float> tableD;
extern const vector<float> table_hsin;
struct basicOsc {
private:
float phase = 0.0f;
public:
float freq = 0.0f;
float dt = 1.0f / 44100.0f;
void zero(const float sampleRate_) {
dt = 1.0f / sampleRate_;
phase = 0.0f;
freq = 0.0f;
}
//0-1
float sineDC() {
phase += freq * dt;
if (phase >= 1.0) {
phase -= 1.0;
}
return (aproxSine(2.0f * PI * phase) + 1.0f) * 0.5f;
}
float triDC() {
float tri;
if (phase < 0.25f)
tri = 4.0f * phase;
else if (phase < 0.75f)
tri = 2.0f - 4.0f * phase;
else
tri = -4.0f + 4.0f * phase;
phase += freq * dt;
if (phase >= 1.0f) {
phase -= 1.0f;
}
return tri + 0.5f * 0.5f;
}
//-1,1
float sineAC() {
phase += freq * dt;
if (phase >= 1.0) {
phase -= 1.0;
}
return aproxSine(2.0f * PI * phase);
}
};
//struct slightlyFancyDCOsc {
//private:
// float phase = 0.0f;
//
//public:
// float freq = 0.0f;
// float level = 0.0f;
// float dt = 1.0f / 44100.0f;
//
//
// void zero(const float sampleRate_) {
// dt = 1.0f / sampleRate_;
// phase = 0.0f;
// freq = 0.0f;
// level = 0.0f;
// }
//
//
// void step(float dt) {
// float deltaPhase = fminf(freq * dt, 0.5);
// phase += deltaPhase;
//
// if (phase >= 1.0) {
// phase -= 1.0;
// }
//
// }
//
//
// float sine() {
// return ((sinf(2.0f * PI * phase) + 1.0f) / 2.0f) * level;
// }
//
// float tri() {
// float tri;
// if (phase < 0.25f)
// tri = 4.0f * phase;
// else if (phase < 0.75)
// tri = 2.0f - 4.0f * phase;
// else
// tri = -4.0f + 4.0f * phase;
// return ((tri + 1.0f) / 2.0f) * level;
// }
//
// float saw() {
// float saw;
// if (phase < 0.5f)
// saw = 2.0f * phase;
// else
// saw = -2.0f + 2.0f * phase;
// return ((saw + 1.0f) / 2.0f) * level;
// }
//
// float sqr() {
// float sqr = phase < 0.5f ? 1.0f : -1.0f;
// return ((sqr + 1.0f) / 2.0f) * level;
// }
//
// float hsin() {
// return ((sinf(PI * phase) + 1.0f) / 2.0f) * level;
// }
//
// float qsin() {
// return ((sinf((PI / 2.0f) * phase) + 1.0f) / 2.0f) * level;
// }
//
//
//
//};
////Note each tracks 2 voices P and S for detune
//struct simpleWTableOsc {
//private:
// //const float slewMin = 0.1;
// //const float slewMax = 10000.0;
// const float shapeScale = 1 / 10.0;
// const float minCutoff = 20.0f;
// const float maxCutoff = 10560.0f;
//
// StateVariableFilter filter;
// float filterType = 0.0f;
//
// float dIdxP = 0.0f;
// float dIdxS = 0.0f;
// float idxP = 0.0f;
// float idxS = 0.0f;
// float sampleRate = 1.0f;
// float freqMultiplier = 1.0f;
// float freq = 0.0f;
//
//public:
// float mixX = 0.0f;
// float mixY = 0.0f;
//
// simpleWTableOsc(const float sampleRate_) {
// sampleRate = sampleRate_;
// filterType = 0.0f;
// dIdxP = 0.0f;
// dIdxS = 0.0f;
// idxP = 0.0f;
// idxS = 0.0f;
// mixX = 0.0f;
// mixY = 0.0f;
// freqMultiplier = 1.0f;
// freq = 0.0f;
//
// filter.zero(sampleRate);
// }
//
// void setFreq(const float hz_) {
// //cout << "Freq set to " << hz_ << " hz " << endl;
// freq = hz_;
// }
//
// //0-1
// void setFilterCutoff(const float c_) {
// filter.setFreq(minCutoff * powf(maxCutoff / minCutoff, c_));
// }
//
// void setFilterType(const float c_) {
// filterType = c_;
// }
//
// //0-1
// void setDetune(const float octave_) {
// const auto cents = octave_ * 1200.0f;
// freqMultiplier = roundf(100000.0f * powf(2.0f, (floorf(cents) / 100.0f / 12.0f))) / 100000.0f;
// }
//
//
// void setMix(const float mX_, const float mY_) {
// mixX = mX_;
// mixY = mY_;
// }
//
//
// float wav() {
// float intpartP, intpartS = 0;
// float fracpartP = modf(idxP, &intpartP);
// float fracpartS = modf(idxS, &intpartS);
//
// dIdxP = (freq) / (sampleRate / static_cast<float>(4160));
// dIdxS = (freq * freqMultiplier) / (sampleRate / static_cast<float>(4160));
//
// if (intpartP > 4160 - 1) {
// intpartP = 0;
// }
//
// if (intpartS > 4160 - 1) {
// intpartS = 0;
// }
//
// //apparently broken allpass interp.... try real allpass someday
// const auto sampA = (1.0f - fracpartP) * tableA[static_cast<int>(intpartP)] + tableA[(static_cast<int>(intpartP) + 1) % 4160] * fracpartP;
// const auto sampB = (1.0f - fracpartS) * tableA[static_cast<int>(intpartS)] + tableA[(static_cast<int>(intpartS) + 1) % 4160] * fracpartS;
// const auto sampC = (1.0f - fracpartP) * tableB[static_cast<int>(intpartP)] + tableB[(static_cast<int>(intpartP) + 1) % 4160] * fracpartP;
// const auto sampD = (1.0f - fracpartS) * tableB[static_cast<int>(intpartS)] + tableB[(static_cast<int>(intpartS) + 1) % 4160] * fracpartS;
// const auto sampE = (1.0f - fracpartP) * tableC[static_cast<int>(intpartP)] + tableC[(static_cast<int>(intpartP) + 1) % 4160] * fracpartP;
// const auto sampF = (1.0f - fracpartS) * tableC[static_cast<int>(intpartS)] + tableC[(static_cast<int>(intpartS) + 1) % 4160] * fracpartS;
// const auto sampG = (1.0f - fracpartP) * tableD[static_cast<int>(intpartP)] + tableD[(static_cast<int>(intpartP) + 1) % 4160] * fracpartP;
// const auto sampH = (1.0f - fracpartS) * tableD[static_cast<int>(intpartS)] + tableD[(static_cast<int>(intpartS) + 1) % 4160] * fracpartS;
//
// const float outX1P = lerp<float>(sampA, sampC, mixX);
// const float outX2P = lerp<float>(sampE, sampG, mixX);
// const float outP = lerp<float>(outX1P, outX2P, mixY);
//
// const float outX1S = lerp<float>(sampB, sampD, mixX);
// const float outX2S = lerp<float>(sampF, sampH, mixX);
// const float outS = lerp<float>(outX1S, outX2S, mixY);
//
// idxP = fracpartP + intpartP + dIdxP;
// idxS = fracpartS + intpartS + dIdxS;
//
// const float preFilter = (outP + outS) / 2.0f;
// filter.process(preFilter);
// return lerp<float>(filter.lowpass(), filter.highpass(), filterType);
//
// }
//
//
//};
//struct STF {
//private:
// const float minCutoff = 20.0f;
// const float maxCutoff = 10560.0f;
// float sampleRate = 1.0;
//
// array<float, POLY> state1;
// array<float, POLY> state2;
// array<float, POLY> lp;
// array<float, POLY> bp;
// array<float, POLY> hp;
// array<float, POLY> o;
// array<float, POLY> h;
//
//public:
// float Q = 1.0f;
//
// //0-1
// array<float, POLY> cutOffParam;
//
// void zero(const float sampleRate_) {
// sampleRate = sampleRate_;
// state1.fill(0.0f);
// state2.fill(0.0f);
// lp.fill(0.0f);
// bp.fill(0.0f);
// hp.fill(0.0f);
// o.fill(0.0f);
// h.fill(0.0f);
// cutOffParam.fill(0.0f);
// }
//
//
// void process(array<float, POLY>& in_) {
//
// #pragma omp simd
// for (int i = 0; i < POLY; i++) {
// o[i] = tanf(((minCutoff * powf(maxCutoff / minCutoff, cutOffParam[i])) / sampleRate) * PI);
// h[i] = 1.0f / (1.0f + o[i] / Q + o[i] * o[i]);
// }
//
// #pragma omp simd
// for (int i = 0; i < POLY; i++) {
// hp[i] = h[i] * (in_[i] - (1.0f / Q + o[i]) * state1[i] - state2[i]);
// bp[i] = o[i] * hp[i] + state1[i];
// state1[i] = o[i] * hp[i] + bp[i];
// lp[i] = o[i] * bp[i] + state2[i];
// state2[i] = o[i] * bp[i] + lp[i];
// }
// }
//
// array<float, POLY>* lowpass() {
// return &lp;
// }
//
// array<float, POLY>* bandpass() {
// return &bp;
// }
//
// array<float, POLY>* highpass() {
// return &hp;
// }
//
// //Well that won't work
// //array<float, 8>* notch() {
// // return hp + lp;
// //}
//
//};
//struct BlockWTableOsc {
//private:
//
// const float shapeScale = 1 / 10.0;
// float tableRate = 1.0;
//
// array<float, POLY> dIdxP;
// array<float, POLY> dIdxS;
// array<float, POLY> idxP;
// array<float, POLY> idxS;
//
// array<float, POLY> intpartP;
// array<float, POLY> intpartS;
// array<float, POLY> fracpartP;
// array<float, POLY> fracpartS;
//
//
//
// array<array<float, POLY>, POLY> buffer; //The outside 8 is poly the inside is the samples for the lerps and just happens to be 8
//
//
// array<float, POLY> outA;
// array<float, POLY> outB;
// array<float, POLY> outC;
// array<float, POLY> outD;
//
// array<float, POLY> outP;
// array<float, POLY> outS;
//
// //y(n) = x(n - (M + 1)
//
//
//public:
// array<float, POLY> filterType;
// array<float, POLY> freqMultiplier;
// array<float, POLY> freq;
// array<float, POLY> mixX;
// array<float, POLY> mixY;
//
// STF filter;
//
// BlockWTableOsc(const float sampleRate_) {
// tableRate = sampleRate_ / static_cast<float>(4160);
// fill(filterType.begin(), filterType.end(), 0.0f);
// fill(dIdxP.begin(), dIdxP.end(), 0.0f);
// fill(dIdxS.begin(), dIdxS.end(), 0.0f);
// fill(idxP.begin(), idxP.end(), 0.0f);
// fill(idxS.begin(), idxS.end(), 0.0f);
// fill(intpartP.begin(), intpartP.end(), 0.0f);
// fill(intpartS.begin(), intpartS.end(), 0.0f);
// fill(fracpartP.begin(), fracpartP.end(), 0.0f);
// fill(fracpartS.begin(), fracpartS.end(), 0.0f);
// fill(freqMultiplier.begin(), freqMultiplier.end(), 0.0f);
// fill(freq.begin(), freq.end(), 0.0f);
// fill(mixX.begin(), mixX.end(), 0.0f);
// fill(mixY.begin(), mixY.end(), 0.0f);
// fill(outA.begin(), outA.end(), 0.0f);
// fill(outB.begin(), outB.end(), 0.0f);
// fill(outC.begin(), outC.end(), 0.0f);
// fill(outD.begin(), outD.end(), 0.0f);
// fill(outP.begin(), outP.end(), 0.0f);
// fill(outS.begin(), outS.end(), 0.0f);
//
// for (auto& b : buffer) {
// for (auto& v : b) {
// v = 0.0f;
// }
// }
//
// filter.zero(sampleRate_);
// }
//
//
// //0-1
// //void setDetune(const array<float, 8> octave_) {
// // for (int i = 0; i < 8; i++) {
// // freqMultiplier[i] = round(100000.0f * pow(2.0f, (floor(octave_[i] * 1200.0) / 100.0f / 12.0f))) / 100000.0f;
// // }
// //}
//
// void wav(array<float, POLY> &in_) {
//
// for (int i = 0; i < POLY; i++) {
// intpartP[i] = 0.0f;
// intpartS[i] = 0.0f;
// fracpartP[i] = modf(idxP[i], &intpartP[i]);
// fracpartS[i] = modf(idxS[i], &intpartS[i]);
//
// dIdxP[i] = (freq[i]) / tableRate;
// dIdxS[i] = (freq[i] * freqMultiplier[i]) / tableRate;
//
// if (intpartP[i] > 4160 - 1) {
// intpartP[i] = 0;
// }
//
// if (intpartS[i] > 4160 - 1) {
// intpartS[i] = 0;
// }
//
// }
//
//
// //This is bad since it depends on the size of POLY
// //If poly changes, see commented lines
// //Fix
// #pragma omp simd
// for (int i = 0; i < POLY; i++) {
// buffer[0][i] = (1.0f - fracpartP[i]) * tableA[static_cast<int>(intpartP[i])] + tableA[(static_cast<int>(intpartP[i]) + 1) % 4160] * fracpartP[i];
// buffer[1][i] = (1.0f - fracpartS[i]) * tableA[static_cast<int>(intpartS[i])] + tableA[(static_cast<int>(intpartS[i]) + 1) % 4160] * fracpartS[i];
// buffer[2][i] = (1.0f - fracpartP[i]) * tableB[static_cast<int>(intpartP[i])] + tableB[(static_cast<int>(intpartP[i]) + 1) % 4160] * fracpartP[i];
// buffer[3][i] = (1.0f - fracpartS[i]) * tableB[static_cast<int>(intpartS[i])] + tableB[(static_cast<int>(intpartS[i]) + 1) % 4160] * fracpartS[i];
// //buffer[4][i] = (1.0f - fracpartP[i]) * tableC[static_cast<int>(intpartP[i])] + tableC[(static_cast<int>(intpartP[i]) + 1) % 4160] * fracpartP[i];
// //buffer[5][i] = (1.0f - fracpartS[i]) * tableC[static_cast<int>(intpartS[i])] + tableC[(static_cast<int>(intpartS[i]) + 1) % 4160] * fracpartS[i];
// //buffer[6][i] = (1.0f - fracpartP[i]) * tableD[static_cast<int>(intpartP[i])] + tableD[(static_cast<int>(intpartP[i]) + 1) % 4160] * fracpartP[i];
// //buffer[7][i] = (1.0f - fracpartS[i]) * tableD[static_cast<int>(intpartS[i])] + tableD[(static_cast<int>(intpartS[i]) + 1) % 4160] * fracpartS[i];
// }
//
// //apparently broken allpass interp.... try real allpass someday
//
//
// plerp<float, POLY>(buffer[0], buffer[2], mixX, outA);
// plerp<float, POLY>(buffer[4], buffer[6], mixX, outB);
// plerp<float, POLY>(outA, outB, mixY, outP);
//
// plerp<float, POLY>(buffer[1], buffer[3], mixX, outC);
// plerp<float, POLY>(buffer[5], buffer[7], mixX, outD);
// plerp<float, POLY>(outC, outD, mixY, outS);
//
// #pragma omp simd
// for (int i = 0; i < POLY; i++) {
// idxP[i] = fracpartP[i] + intpartP[i] + dIdxP[i];
// idxS[i] = fracpartS[i] + intpartS[i] + dIdxS[i];
// in_[i] = (outP[i] + outS[i]) / 2.0f;
// }
//
// filter.process(in_);
// plerp<float, POLY>(*filter.lowpass(), *filter.highpass(), filterType, in_);
//
//
// }
//
//
//};
struct simplerWTableOsc {
private:
const float slewMin = 0.1;
const float slewMax = 10000.0;
const float shapeScale = 1 / 10.0;
const float minCutoff = 20.0f;
const float maxCutoff = 10560.0f;
StateVariableFilter filter;
float filterType = 0.0f;
float dt = 0.0f;
float sampleRate = 0.0f;
float dIdxP = 0.0f;
float dIdxS = 0.0f;
float idxP = 0.0f;
float idxS = 0.0f;
float freqMultiplier = 1.0f;
float freq = 0.0f;
float levelA = 0.0f;
float levelB = 0.0f;
public:
simplerWTableOsc(const float sampleRate_) {
sampleRate = sampleRate_;
dt = 1.0f / (sampleRate_ / static_cast<float>(4160));
filterType = 0.0f;
dIdxP = 0.0f;
dIdxS = 0.0f;
idxP = 0.0f;
idxS = 0.0f;
levelA = 0.0f;
levelB = 0.0f;
freqMultiplier = 1.0f;
freq = 0.0f;
filter.zero(sampleRate_);
filter.setFreq(minCutoff);
}
void setFreq(const float hz_) {
//cout << "Freq set to " << hz_ << " hz " << endl;
freq = hz_;
}
//0-1
void setFilterCutoff(const float c_) {
filter.setFreq(minCutoff * powf(maxCutoff / minCutoff, c_));
}
void setFilterType(const float c_) {
filterType = c_;
}
//0-1
void setDetune(const float octave_) {
const auto cents = octave_ * 1200.0f;
freqMultiplier = roundf(100000.0f * powf(2.0f, (floorf(cents) / 100.0f / 12.0f))) / 100000.0f;
}
void setMix(const float levelA_, const float levelB_) {
levelA = levelA_;
levelB = levelB_;
}
float wav() {
//apparently broken allpass interp.... try real allpass someday
const auto sampA = interpvec<float>(tableA, idxP);
const auto sampB = interpvec<float>(tableA, idxS);
const auto sampC = interpvec<float>(tableB, idxP);
const auto sampD = interpvec<float>(tableB, idxP);
const float outA = sampA + sampB * 0.5f;
const float outB = sampC + sampD * 0.5f;
dIdxP = (freq)* dt;
dIdxS = (freq * freqMultiplier) * dt;
idxP += dIdxP;
idxS += dIdxS;
if (idxP > 4160 - 1) {
idxP -= 4160 - 1;
}
if (idxS > 4160 - 1) {
idxS -= 4160 - 1;
}
const float sample = (outA * levelA + outB * levelB) / max<float>(1.0f, levelA+levelB);
filter.process(sample);
return lerp<float>(filter.lowpass(), filter.highpass(), filterType);
}
};
//struct qamOsc {
//private:
// const float minCutoff = 20.0f;
// const float maxCutoff = 10560.0f;
//
// StateVariableFilter filter;
// float filterType = 0.0f;
//
// const array<float, 7> ratios = {
// 1.0f / 10.0f, 1.0f / 5.0f, 1.0f / 2.0f, 1.0f, 2.0f, 3.0f, 4.0f
// };
//
//
// float q_phase = 0.0f;
// float m_phase = 0.0f;
//
// float sampleRate = 1.0f;
// float dt = 0.0f;
// float freq = 0.0f;
// float ratio = 0.428571f;
//
//
//public:
//
//
// qamOsc(const float sampleRate_) {
// sampleRate = sampleRate_;
// dt = 1.0f / sampleRate;
// filterType = 0.0f;
// freq = 0.0f;
// q_phase = 0.0f;
// m_phase = 0.0f;
// ratio = 0.428571f;
// filter.zero(sampleRate);
// filter.setFreq(minCutoff);
// }
//
// void setFreq(const float hz_) {
// freq = hz_;
// }
//
// //0-1
// void setFilterCutoff(const float c_) {
// filter.setFreq(minCutoff * powf(maxCutoff / minCutoff, c_));
// }
// //0-1
// void setFilterType(const float c_) {
// filterType = c_;
// }
//
// //0-1
// void setRatio(const float r_) {
// ratio = r_;
// }
//
//
//
// float wav() {
// const float dp_c = freq * dt;
// const float dp_m = freq * interparray(ratios, ratio) * dt;
//
//
//
// q_phase += dp_c;
// m_phase += dp_m;
//
// if (q_phase > 1.0f) {
// q_phase -= 1.0f;
// }
//
// if (m_phase > 1.0f) {
// m_phase -= 1.0f;
// }
//
// const float m = (sinf(2.0f * PI * m_phase) * 0.5f) + 1.0f;
// //const float m = sin(2 * PI * m_phase);
// const float qs = sinf(2.0f * PI * q_phase);
// const float qc = sinf(2.0f * PI * q_phase) * m;
// const float sample = (qs + qc) * ISQRR2;
// //const float sample = m;
// filter.process(sample);
//
// return lerp<float>(filter.lowpass(), filter.highpass(), filterType);
//
// }
//
//
//};
|
segment.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS EEEEE GGGG M M EEEEE N N TTTTT %
% SS E G MM MM E NN N T %
% SSS EEE G GGG M M M EEE N N N T %
% SS E G G M M E N NN T %
% SSSSS EEEEE GGGG M M EEEEE N N T %
% %
% %
% MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means %
% %
% Software Design %
% Cristy %
% April 1993 %
% %
% %
% Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Segment segments an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% c-means technique. The scale-space filter analyzes the histograms of
% the three color components of the image and identifies a set of
% classes. The extents of each class is used to coarsely segment the
% image with thresholding. The color associated with each class is
% determined by the mean color of all pixels within the extents of a
% particular class. Finally, any unclassified pixels are assigned to
% the closest class with the fuzzy c-means technique.
%
% The fuzzy c-Means algorithm can be summarized as follows:
%
% o Build a histogram, one for each color component of the image.
%
% o For each histogram, successively apply the scale-space filter and
% build an interval tree of zero crossings in the second derivative
% at each scale. Analyze this scale-space ''fingerprint'' to
% determine which peaks and valleys in the histogram are most
% predominant.
%
% o The fingerprint defines intervals on the axis of the histogram.
% Each interval contains either a minima or a maxima in the original
% signal. If each color component lies within the maxima interval,
% that pixel is considered ''classified'' and is assigned an unique
% class number.
%
% o Any pixel that fails to be classified in the above thresholding
% pass is classified using the fuzzy c-Means technique. It is
% assigned to one of the classes discovered in the histogram analysis
% phase.
%
% The fuzzy c-Means technique attempts to cluster a pixel by finding
% the local minima of the generalized within group sum of squared error
% objective function. A pixel is assigned to the closest class of
% which the fuzzy membership has a maximum value.
%
% Segment is strongly based on software written by Andy Gallo,
% University of Delaware.
%
% The following reference was used in creating this program:
%
% Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation
% Algorithm Based on the Thresholding and the Fuzzy c-Means
% Techniques", Pattern Recognition, Volume 23, Number 9, pages
% 935-952, 1990.
%
%
*/
#include "MagickCore/studio.h"
#include "MagickCore/cache.h"
#include "MagickCore/color.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/pixel-private.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
/*
Define declarations.
*/
#define MaxDimension 3
#define DeltaTau 0.5f
#if defined(FastClassify)
#define WeightingExponent 2.0
#define SegmentPower(ratio) (ratio)
#else
#define WeightingExponent 2.5
#define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0)));
#endif
#define Tau 5.2f
/*
Typedef declarations.
*/
typedef struct _ExtentPacket
{
double
center;
ssize_t
index,
left,
right;
} ExtentPacket;
typedef struct _Cluster
{
struct _Cluster
*next;
ExtentPacket
red,
green,
blue;
ssize_t
count,
id;
} Cluster;
typedef struct _IntervalTree
{
double
tau;
ssize_t
left,
right;
double
mean_stability,
stability;
struct _IntervalTree
*sibling,
*child;
} IntervalTree;
typedef struct _ZeroCrossing
{
double
tau,
histogram[256];
short
crossings[256];
} ZeroCrossing;
/*
Constant declarations.
*/
static const int
Blue = 2,
Green = 1,
Red = 0,
SafeMargin = 3,
TreeLength = 600;
/*
Method prototypes.
*/
static double
OptimalTau(const ssize_t *,const double,const double,const double,
const double,short *);
static ssize_t
DefineRegion(const short *,ExtentPacket *);
static void
FreeNodes(IntervalTree *),
InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *),
ScaleSpace(const ssize_t *,const double,double *),
ZeroCrossHistogram(double *,const double,short *);
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C l a s s i f y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Classify() defines one or more classes. Each pixel is thresholded to
% determine which class it belongs to. If the class is not identified it is
% assigned to the closest class based on the fuzzy c-Means technique.
%
% The format of the Classify method is:
%
% MagickBooleanType Classify(Image *image,short **extrema,
% const double cluster_threshold,
% const double weighting_exponent,
% const MagickBooleanType verbose,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o weighting_exponent: Specifies the membership weighting exponent.
%
% o verbose: A value greater than zero prints detailed information about
% the identified classes.
%
% o exception: return any errors or warnings in this structure.
%
*/
static MagickBooleanType Classify(Image *image,short **extrema,
const double cluster_threshold,
const double weighting_exponent,const MagickBooleanType verbose,
ExceptionInfo *exception)
{
#define SegmentImageTag "Segment/Image"
CacheView
*image_view;
Cluster
*cluster,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickOffsetType
progress;
double
*free_squares;
MagickStatusType
status;
register ssize_t
i;
register double
*squares;
size_t
number_clusters;
ssize_t
count,
y;
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
status=MagickTrue;
count=0;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,2*
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
number_clusters=(size_t) count;
if (verbose != MagickFalse)
{
/*
Print cluster statistics.
*/
(void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n");
(void) FormatLocaleFile(stdout,"===================\n\n");
(void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double)
cluster_threshold);
(void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double)
weighting_exponent);
(void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n",
(double) number_clusters);
/*
Print the total number of points per cluster.
*/
(void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n");
(void) FormatLocaleFile(stdout,"=============================\n\n");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
(void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double)
cluster->id,(double) cluster->count);
/*
Print the cluster extents.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,
"%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double)
cluster->red.left,(double) cluster->red.right,(double)
cluster->green.left,(double) cluster->green.right,(double)
cluster->blue.left,(double) cluster->blue.right);
}
/*
Print the cluster center values.
*/
(void) FormatLocaleFile(stdout,
"\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension);
(void) FormatLocaleFile(stdout,"=====================");
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
(void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double)
cluster->id);
(void) FormatLocaleFile(stdout,"%g %g %g\n",(double)
cluster->red.center,(double) cluster->green.center,(double)
cluster->blue.center);
}
(void) FormatLocaleFile(stdout,"\n");
}
if (number_clusters > 256)
ThrowBinaryException(ImageError,"TooManyClusters",image->filename);
/*
Speed up distance calculations.
*/
squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares));
if (squares == (double *) NULL)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
squares+=255;
for (i=(-255); i <= 255; i++)
squares[i]=(double) i*(double) i;
/*
Allocate image colormap.
*/
if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse)
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename);
i=0;
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
{
image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char)
(cluster->red.center+0.5));
image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char)
(cluster->green.center+0.5));
image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char)
(cluster->blue.center+0.5));
i++;
}
/*
Do course grain classes.
*/
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Cluster
*clust;
register const PixelInfo
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelIndex(image,0,q);
for (clust=head; clust != (Cluster *) NULL; clust=clust->next)
{
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >=
(clust->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <=
(clust->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >=
(clust->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <=
(clust->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >=
(clust->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <=
(clust->blue.right+SafeMargin)))
{
/*
Classify this pixel.
*/
SetPixelIndex(image,(Quantum) clust->id,q);
break;
}
}
if (clust == (Cluster *) NULL)
{
double
distance_squared,
local_minima,
numerator,
ratio,
sum;
register ssize_t
j,
k;
/*
Compute fuzzy membership.
*/
local_minima=0.0;
for (j=0; j < (ssize_t) image->colors; j++)
{
sum=0.0;
p=image->colormap+j;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t)
ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
numerator=distance_squared;
for (k=0; k < (ssize_t) image->colors; k++)
{
p=image->colormap+k;
distance_squared=squares[(ssize_t) ScaleQuantumToChar(
GetPixelRed(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[
(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t)
ScaleQuantumToChar(ClampToQuantum(p->blue))];
ratio=numerator/distance_squared;
sum+=SegmentPower(ratio);
}
if ((sum != 0.0) && ((1.0/sum) > local_minima))
{
/*
Classify this pixel.
*/
local_minima=1.0/sum;
SetPixelIndex(image,(Quantum) j,q);
}
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_Classify)
#endif
proceed=SetImageProgress(image,SegmentImageTag,progress++,
2*image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
status&=SyncImage(image,exception);
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
squares-=255;
free_squares=squares;
free_squares=(double *) RelinquishMagickMemory(free_squares);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C o n s o l i d a t e C r o s s i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ConsolidateCrossings() guarantees that an even number of zero crossings
% always lie between two crossings.
%
% The format of the ConsolidateCrossings method is:
%
% ConsolidateCrossings(ZeroCrossing *zero_crossing,
% const size_t number_crossings)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void ConsolidateCrossings(ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
register ssize_t
i,
j,
k,
l;
ssize_t
center,
correct,
count,
left,
right;
/*
Consolidate zero crossings.
*/
for (i=(ssize_t) number_crossings-1; i >= 0; i--)
for (j=0; j <= 255; j++)
{
if (zero_crossing[i].crossings[j] == 0)
continue;
/*
Find the entry that is closest to j and still preserves the
property that there are an even number of crossings between
intervals.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i+1].crossings[k] != 0)
break;
left=MagickMax(k,0);
center=j;
for (k=j+1; k < 255; k++)
if (zero_crossing[i+1].crossings[k] != 0)
break;
right=MagickMin(k,255);
/*
K is the zero crossing just left of j.
*/
for (k=j-1; k > 0; k--)
if (zero_crossing[i].crossings[k] != 0)
break;
if (k < 0)
k=0;
/*
Check center for an even number of crossings between k and j.
*/
correct=(-1);
if (zero_crossing[i+1].crossings[j] != 0)
{
count=0;
for (l=k+1; l < center; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (center != k))
correct=center;
}
/*
Check left for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < left; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (left != k))
correct=left;
}
/*
Check right for an even number of crossings between k and j.
*/
if (correct == -1)
{
count=0;
for (l=k+1; l < right; l++)
if (zero_crossing[i+1].crossings[l] != 0)
count++;
if (((count % 2) == 0) && (right != k))
correct=right;
}
l=(ssize_t) zero_crossing[i].crossings[j];
zero_crossing[i].crossings[j]=0;
if (correct != -1)
zero_crossing[i].crossings[correct]=(short) l;
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e f i n e R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DefineRegion() defines the left and right boundaries of a peak region.
%
% The format of the DefineRegion method is:
%
% ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
%
% A description of each parameter follows.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
% o extents: This pointer to an ExtentPacket represent the extends
% of a particular peak or valley of a color component.
%
*/
static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents)
{
/*
Initialize to default values.
*/
extents->left=0;
extents->center=0.0;
extents->right=255;
/*
Find the left side (maxima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] > 0)
break;
if (extents->index > 255)
return(MagickFalse); /* no left side - no region exists */
extents->left=extents->index;
/*
Find the right side (minima).
*/
for ( ; extents->index <= 255; extents->index++)
if (extrema[extents->index] < 0)
break;
extents->right=extents->index-1;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D e r i v a t i v e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DerivativeHistogram() determines the derivative of the histogram using
% central differencing.
%
% The format of the DerivativeHistogram method is:
%
% DerivativeHistogram(const double *histogram,
% double *derivative)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
% o derivative: This array of doubles is initialized by
% DerivativeHistogram to the derivative of the histogram using central
% differencing.
%
*/
static void DerivativeHistogram(const double *histogram,
double *derivative)
{
register ssize_t
i,
n;
/*
Compute endpoints using second order polynomial interpolation.
*/
n=255;
derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]);
derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]);
/*
Compute derivative using central differencing.
*/
for (i=1; i < n; i++)
derivative[i]=(histogram[i+1]-histogram[i-1])/2.0;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e D y n a m i c T h r e s h o l d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageDynamicThreshold() returns the dynamic threshold for an image.
%
% The format of the GetImageDynamicThreshold method is:
%
% MagickBooleanType GetImageDynamicThreshold(const Image *image,
% const double cluster_threshold,const double smooth_threshold,
% PixelInfo *pixel,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o cluster_threshold: This double represents the minimum number of
% pixels contained in a hexahedra before it can be considered valid
% (expressed as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o pixel: return the dynamic threshold here.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image,
const double cluster_threshold,const double smooth_threshold,
PixelInfo *pixel,ExceptionInfo *exception)
{
Cluster
*background,
*cluster,
*object,
*head,
*last_cluster,
*next_cluster;
ExtentPacket
blue,
green,
red;
MagickBooleanType
proceed;
double
threshold;
register const Quantum
*p;
register ssize_t
i,
x;
short
*extrema[MaxDimension];
ssize_t
count,
*histogram[MaxDimension],
y;
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
GetPixelInfo(image,pixel);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
}
/*
Initialize histogram.
*/
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau,
(smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]);
/*
Form clusters.
*/
cluster=(Cluster *) NULL;
head=(Cluster *) NULL;
(void) memset(&red,0,sizeof(red));
(void) memset(&green,0,sizeof(green));
(void) memset(&blue,0,sizeof(blue));
while (DefineRegion(extrema[Red],&red) != 0)
{
green.index=0;
while (DefineRegion(extrema[Green],&green) != 0)
{
blue.index=0;
while (DefineRegion(extrema[Blue],&blue) != 0)
{
/*
Allocate a new class.
*/
if (head != (Cluster *) NULL)
{
cluster->next=(Cluster *) AcquireMagickMemory(
sizeof(*cluster->next));
cluster=cluster->next;
}
else
{
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
head=cluster;
}
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",
image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
}
}
}
if (head == (Cluster *) NULL)
{
/*
No classes were identified-- create one.
*/
cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster));
if (cluster == (Cluster *) NULL)
{
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename);
return(MagickFalse);
}
/*
Initialize a new class.
*/
cluster->count=0;
cluster->red=red;
cluster->green=green;
cluster->blue=blue;
cluster->next=(Cluster *) NULL;
head=cluster;
}
/*
Count the pixels for each cluster.
*/
count=0;
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next)
if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >=
(cluster->red.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <=
(cluster->red.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >=
(cluster->green.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <=
(cluster->green.right+SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >=
(cluster->blue.left-SafeMargin)) &&
((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <=
(cluster->blue.right+SafeMargin)))
{
/*
Count this pixel.
*/
count++;
cluster->red.center+=(double) ScaleQuantumToChar(
GetPixelRed(image,p));
cluster->green.center+=(double) ScaleQuantumToChar(
GetPixelGreen(image,p));
cluster->blue.center+=(double) ScaleQuantumToChar(
GetPixelBlue(image,p));
cluster->count++;
break;
}
p+=GetPixelChannels(image);
}
proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y,
2*image->rows);
if (proceed == MagickFalse)
break;
}
/*
Remove clusters that do not meet minimum cluster threshold.
*/
count=0;
last_cluster=head;
next_cluster=head;
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
if ((cluster->count > 0) &&
(cluster->count >= (count*cluster_threshold/100.0)))
{
/*
Initialize cluster.
*/
cluster->id=count;
cluster->red.center/=cluster->count;
cluster->green.center/=cluster->count;
cluster->blue.center/=cluster->count;
count++;
last_cluster=cluster;
continue;
}
/*
Delete cluster.
*/
if (cluster == head)
head=next_cluster;
else
last_cluster->next=next_cluster;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
object=head;
background=head;
if (count > 1)
{
object=head->next;
for (cluster=object; cluster->next != (Cluster *) NULL; )
{
if (cluster->count < object->count)
object=cluster;
cluster=cluster->next;
}
background=head->next;
for (cluster=background; cluster->next != (Cluster *) NULL; )
{
if (cluster->count > background->count)
background=cluster;
cluster=cluster->next;
}
}
if (background != (Cluster *) NULL)
{
threshold=(background->red.center+object->red.center)/2.0;
pixel->red=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->green.center+object->green.center)/2.0;
pixel->green=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
threshold=(background->blue.center+object->blue.center)/2.0;
pixel->blue=(double) ScaleCharToQuantum((unsigned char)
(threshold+0.5));
}
/*
Relinquish resources.
*/
for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster)
{
next_cluster=cluster->next;
cluster=(Cluster *) RelinquishMagickMemory(cluster);
}
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeHistogram() computes the histogram for an image.
%
% The format of the InitializeHistogram method is:
%
% InitializeHistogram(const Image *image,ssize_t **histogram)
%
% A description of each parameter follows.
%
% o image: Specifies a pointer to an Image structure; returned from
% ReadImage.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void InitializeHistogram(const Image *image,ssize_t **histogram,
ExceptionInfo *exception)
{
register const Quantum
*p;
register ssize_t
i,
x;
ssize_t
y;
/*
Initialize histogram.
*/
for (i=0; i <= 255; i++)
{
histogram[Red][i]=0;
histogram[Green][i]=0;
histogram[Blue][i]=0;
}
for (y=0; y < (ssize_t) image->rows; y++)
{
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++;
histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++;
histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++;
p+=GetPixelChannels(image);
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ I n i t i a l i z e I n t e r v a l T r e e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InitializeIntervalTree() initializes an interval tree from the lists of
% zero crossings.
%
% The format of the InitializeIntervalTree method is:
%
% InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes,
% IntervalTree *node)
%
% A description of each parameter follows.
%
% o zero_crossing: Specifies an array of structures of type ZeroCrossing.
%
% o number_crossings: This size_t specifies the number of elements
% in the zero_crossing array.
%
*/
static void InitializeList(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
list[(*number_nodes)++]=node;
InitializeList(list,number_nodes,node->sibling);
InitializeList(list,number_nodes,node->child);
}
static void MeanStability(IntervalTree *node)
{
register IntervalTree
*child;
if (node == (IntervalTree *) NULL)
return;
node->mean_stability=0.0;
child=node->child;
if (child != (IntervalTree *) NULL)
{
register ssize_t
count;
register double
sum;
sum=0.0;
count=0;
for ( ; child != (IntervalTree *) NULL; child=child->sibling)
{
sum+=child->stability;
count++;
}
node->mean_stability=sum/(double) count;
}
MeanStability(node->sibling);
MeanStability(node->child);
}
static void Stability(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->child == (IntervalTree *) NULL)
node->stability=0.0;
else
node->stability=node->tau-(node->child)->tau;
Stability(node->sibling);
Stability(node->child);
}
static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing,
const size_t number_crossings)
{
IntervalTree
*head,
**list,
*node,
*root;
register ssize_t
i;
ssize_t
j,
k,
left,
number_nodes;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return((IntervalTree *) NULL);
/*
The root is the entire histogram.
*/
root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root));
root->child=(IntervalTree *) NULL;
root->sibling=(IntervalTree *) NULL;
root->tau=0.0;
root->left=0;
root->right=255;
root->mean_stability=0.0;
root->stability=0.0;
(void) memset(list,0,TreeLength*sizeof(*list));
for (i=(-1); i < (ssize_t) number_crossings; i++)
{
/*
Initialize list with all nodes with no children.
*/
number_nodes=0;
InitializeList(list,&number_nodes,root);
/*
Split list.
*/
for (j=0; j < number_nodes; j++)
{
head=list[j];
left=head->left;
node=head;
for (k=head->left+1; k < head->right; k++)
{
if (zero_crossing[i+1].crossings[k] != 0)
{
if (node == head)
{
node->child=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->child));
node=node->child;
}
else
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
}
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=k;
left=k;
}
}
if (left != head->left)
{
node->sibling=(IntervalTree *) AcquireMagickMemory(
sizeof(*node->sibling));
node=node->sibling;
if (node == (IntervalTree *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
FreeNodes(root);
return((IntervalTree *) NULL);
}
node->tau=zero_crossing[i+1].tau;
node->child=(IntervalTree *) NULL;
node->sibling=(IntervalTree *) NULL;
node->left=left;
node->right=head->right;
}
}
}
/*
Determine the stability: difference between a nodes tau and its child.
*/
Stability(root->child);
MeanStability(root->child);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(root);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ O p t i m a l T a u %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% OptimalTau() finds the optimal tau for each band of the histogram.
%
% The format of the OptimalTau method is:
%
% double OptimalTau(const ssize_t *histogram,const double max_tau,
% const double min_tau,const double delta_tau,
% const double smooth_threshold,short *extrema)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of integers representing the number
% of pixels for each intensity of a particular color component.
%
% o extrema: Specifies a pointer to an array of integers. They
% represent the peaks and valleys of the histogram for each color
% component.
%
*/
static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes,
IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
if (node->stability >= node->mean_stability)
{
list[(*number_nodes)++]=node;
ActiveNodes(list,number_nodes,node->sibling);
}
else
{
ActiveNodes(list,number_nodes,node->sibling);
ActiveNodes(list,number_nodes,node->child);
}
}
static void FreeNodes(IntervalTree *node)
{
if (node == (IntervalTree *) NULL)
return;
FreeNodes(node->sibling);
FreeNodes(node->child);
node=(IntervalTree *) RelinquishMagickMemory(node);
}
static double OptimalTau(const ssize_t *histogram,const double max_tau,
const double min_tau,const double delta_tau,const double smooth_threshold,
short *extrema)
{
IntervalTree
**list,
*node,
*root;
MagickBooleanType
peak;
double
average_tau,
*derivative,
*second_derivative,
tau,
value;
register ssize_t
i,
x;
size_t
count,
number_crossings;
ssize_t
index,
j,
k,
number_nodes;
ZeroCrossing
*zero_crossing;
/*
Allocate interval tree.
*/
list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength,
sizeof(*list));
if (list == (IntervalTree **) NULL)
return(0.0);
/*
Allocate zero crossing list.
*/
count=(size_t) ((max_tau-min_tau)/delta_tau)+2;
zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count,
sizeof(*zero_crossing));
if (zero_crossing == (ZeroCrossing *) NULL)
{
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
for (i=0; i < (ssize_t) count; i++)
zero_crossing[i].tau=(-1.0);
/*
Initialize zero crossing list.
*/
derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative));
second_derivative=(double *) AcquireCriticalMemory(256*
sizeof(*second_derivative));
i=0;
for (tau=max_tau; tau >= min_tau; tau-=delta_tau)
{
zero_crossing[i].tau=tau;
ScaleSpace(histogram,tau,zero_crossing[i].histogram);
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
i++;
}
/*
Add an entry for the original histogram.
*/
zero_crossing[i].tau=0.0;
for (j=0; j <= 255; j++)
zero_crossing[i].histogram[j]=(double) histogram[j];
DerivativeHistogram(zero_crossing[i].histogram,derivative);
DerivativeHistogram(derivative,second_derivative);
ZeroCrossHistogram(second_derivative,smooth_threshold,
zero_crossing[i].crossings);
number_crossings=(size_t) i;
derivative=(double *) RelinquishMagickMemory(derivative);
second_derivative=(double *) RelinquishMagickMemory(second_derivative);
/*
Ensure the scale-space fingerprints form lines in scale-space, not loops.
*/
ConsolidateCrossings(zero_crossing,number_crossings);
/*
Force endpoints to be included in the interval.
*/
for (i=0; i <= (ssize_t) number_crossings; i++)
{
for (j=0; j < 255; j++)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]);
for (j=255; j > 0; j--)
if (zero_crossing[i].crossings[j] != 0)
break;
zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]);
}
/*
Initialize interval tree.
*/
root=InitializeIntervalTree(zero_crossing,number_crossings);
if (root == (IntervalTree *) NULL)
{
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(0.0);
}
/*
Find active nodes: stability is greater (or equal) to the mean stability of
its children.
*/
number_nodes=0;
ActiveNodes(list,&number_nodes,root->child);
/*
Initialize extrema.
*/
for (i=0; i <= 255; i++)
extrema[i]=0;
for (i=0; i < number_nodes; i++)
{
/*
Find this tau in zero crossings list.
*/
k=0;
node=list[i];
for (j=0; j <= (ssize_t) number_crossings; j++)
if (zero_crossing[j].tau == node->tau)
k=j;
/*
Find the value of the peak.
*/
peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue :
MagickFalse;
index=node->left;
value=zero_crossing[k].histogram[index];
for (x=node->left; x <= node->right; x++)
{
if (peak != MagickFalse)
{
if (zero_crossing[k].histogram[x] > value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
else
if (zero_crossing[k].histogram[x] < value)
{
value=zero_crossing[k].histogram[x];
index=x;
}
}
for (x=node->left; x <= node->right; x++)
{
if (index == 0)
index=256;
if (peak != MagickFalse)
extrema[x]=(short) index;
else
extrema[x]=(short) (-index);
}
}
/*
Determine the average tau.
*/
average_tau=0.0;
for (i=0; i < number_nodes; i++)
average_tau+=list[i]->tau;
average_tau/=(double) number_nodes;
/*
Relinquish resources.
*/
FreeNodes(root);
zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing);
list=(IntervalTree **) RelinquishMagickMemory(list);
return(average_tau);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S c a l e S p a c e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleSpace() performs a scale-space filter on the 1D histogram.
%
% The format of the ScaleSpace method is:
%
% ScaleSpace(const ssize_t *histogram,const double tau,
% double *scale_histogram)
%
% A description of each parameter follows.
%
% o histogram: Specifies an array of doubles representing the number
% of pixels for each intensity of a particular color component.
%
*/
static void ScaleSpace(const ssize_t *histogram,const double tau,
double *scale_histogram)
{
double
alpha,
beta,
*gamma,
sum;
register ssize_t
u,
x;
gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma));
if (gamma == (double *) NULL)
ThrowFatalException(ResourceLimitFatalError,
"UnableToAllocateGammaMap");
alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI));
beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau));
for (x=0; x <= 255; x++)
gamma[x]=0.0;
for (x=0; x <= 255; x++)
{
gamma[x]=exp((double) beta*x*x);
if (gamma[x] < MagickEpsilon)
break;
}
for (x=0; x <= 255; x++)
{
sum=0.0;
for (u=0; u <= 255; u++)
sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)];
scale_histogram[x]=alpha*sum;
}
gamma=(double *) RelinquishMagickMemory(gamma);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e g m e n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SegmentImage() segment an image by analyzing the histograms of the color
% components and identifying units that are homogeneous with the fuzzy
% C-means technique.
%
% The format of the SegmentImage method is:
%
% MagickBooleanType SegmentImage(Image *image,
% const ColorspaceType colorspace,const MagickBooleanType verbose,
% const double cluster_threshold,const double smooth_threshold,
% ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image: the image.
%
% o colorspace: Indicate the colorspace.
%
% o verbose: Set to MagickTrue to print detailed information about the
% identified classes.
%
% o cluster_threshold: This represents the minimum number of pixels
% contained in a hexahedra before it can be considered valid (expressed
% as a percentage).
%
% o smooth_threshold: the smoothing threshold eliminates noise in the second
% derivative of the histogram. As the value is increased, you can expect a
% smoother second derivative.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SegmentImage(Image *image,
const ColorspaceType colorspace,const MagickBooleanType verbose,
const double cluster_threshold,const double smooth_threshold,
ExceptionInfo *exception)
{
ColorspaceType
previous_colorspace;
MagickBooleanType
status;
register ssize_t
i;
short
*extrema[MaxDimension];
ssize_t
*histogram[MaxDimension];
/*
Allocate histogram and extrema.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
for (i=0; i < MaxDimension; i++)
{
histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram));
extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema));
if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL))
{
for (i-- ; i >= 0; i--)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed",
image->filename)
}
}
/*
Initialize histogram.
*/
previous_colorspace=image->colorspace;
(void) TransformImageColorspace(image,colorspace,exception);
InitializeHistogram(image,histogram,exception);
(void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]);
(void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]);
(void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau,
smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]);
/*
Classify using the fuzzy c-Means technique.
*/
status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose,
exception);
(void) TransformImageColorspace(image,previous_colorspace,exception);
/*
Relinquish resources.
*/
for (i=0; i < MaxDimension; i++)
{
extrema[i]=(short *) RelinquishMagickMemory(extrema[i]);
histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]);
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ Z e r o C r o s s H i s t o g r a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroCrossHistogram() find the zero crossings in a histogram and marks
% directions as: 1 is negative to positive; 0 is zero crossing; and -1
% is positive to negative.
%
% The format of the ZeroCrossHistogram method is:
%
% ZeroCrossHistogram(double *second_derivative,
% const double smooth_threshold,short *crossings)
%
% A description of each parameter follows.
%
% o second_derivative: Specifies an array of doubles representing the
% second derivative of the histogram of a particular color component.
%
% o crossings: This array of integers is initialized with
% -1, 0, or 1 representing the slope of the first derivative of the
% of a particular color component.
%
*/
static void ZeroCrossHistogram(double *second_derivative,
const double smooth_threshold,short *crossings)
{
register ssize_t
i;
ssize_t
parity;
/*
Merge low numbers to zero to help prevent noise.
*/
for (i=0; i <= 255; i++)
if ((second_derivative[i] < smooth_threshold) &&
(second_derivative[i] >= -smooth_threshold))
second_derivative[i]=0.0;
/*
Mark zero crossings.
*/
parity=0;
for (i=0; i <= 255; i++)
{
crossings[i]=0;
if (second_derivative[i] < 0.0)
{
if (parity > 0)
crossings[i]=(-1);
parity=1;
}
else
if (second_derivative[i] > 0.0)
{
if (parity < 0)
crossings[i]=1;
parity=(-1);
}
}
}
|
SerializerIO_WaveletCompression_MPI_Simple.h | /*
* SerializerIO_WaveletCompression_MPI_Simple.h
* CubismZ
*
* Copyright 2018 ETH Zurich. All rights reserved.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#ifndef SERIALIZERIO_WAVELETCOMPRESSION_MPI_SIMPLE_H_
#define SERIALIZERIO_WAVELETCOMPRESSION_MPI_SIMPLE_H_
#endif
#include <typeinfo>
#include <sstream>
#include <numeric>
#ifdef _OPENMP
#include <omp.h>
#else
static int omp_get_max_threads(void) { return 1;}
static int omp_get_thread_num(void) { return 0; }
#endif
#include <Timer.h>
#include <map>
using namespace std;
#include "WaveletSerializationTypes.h"
#include "FullWaveletTransform.h"
#include "WaveletCompressor.h"
#include "CompressionEncoders.h"
//#define _WRITE_AT_ALL_ 1 // peh:
#if defined(_USE_ZEROBITS_)
void float_zero_bits(unsigned int *ul, int n)
{
unsigned int _ul = *ul;
if (n == 0)
_ul = _ul;
else if (n == 4)
_ul = _ul & 0xfffffff0;
else if (n == 8)
_ul = _ul & 0xffffff00;
else if (n == 12)
_ul = _ul & 0xfffff000;
else if (n == 16)
_ul = _ul & 0xffff0000;
*ul = _ul;
}
#endif
template<typename GridType, typename IterativeStreamer>
class SerializerIO_WaveletCompression_MPI_SimpleBlocking
{
protected:
enum
{
NCHANNELS = IterativeStreamer::channels,
NPTS = _BLOCKSIZE_ * _BLOCKSIZE_ * _BLOCKSIZE_
};
enum //some considerations about the per-thread working set
{
DESIREDMEM = (4 * 1024) * 1024,
ENTRYSIZE = sizeof(WaveletCompressor) + sizeof(int),
ENTRIES_CANDIDATE = DESIREDMEM / ENTRYSIZE,
ENTRIES = ENTRIES_CANDIDATE ? ENTRIES_CANDIDATE : 1,
BUFFERSIZE = ENTRIES * ENTRYSIZE,
ALERT = (ENTRIES - 1) * ENTRYSIZE
};
struct CompressionBuffer
{
BlockMetadata hotblocks[ENTRIES];
unsigned char compressedbuffer[2*BUFFERSIZE];
};
struct TimingInfo { float total, fwt, encoding; };
string binaryocean_title, binarylut_title, header;
vector< BlockMetadata > myblockindices; //tells in which compressed chunk is any block, nblocks
HeaderLUT lutheader;
vector< size_t > lut_compression; //tells the number of compressed chunk, and where do they start, nchunks + 2
vector< unsigned char > allmydata; //buffer with the compressed data
size_t written_bytes, pending_writes, completed_writes;
Real threshold;
bool halffloat, verbosity;
int wtype_read, wtype_write; // peh
vector< float > workload_total, workload_fwt, workload_encode; //per-thread cpu time for imbalance insight for fwt and encoding
vector<CompressionBuffer> workbuffer; //per-thread compression buffer
float _encode_and_flush(unsigned char inputbuffer[], long& bufsize, const long maxsize, BlockMetadata metablocks[], int& nblocks)
{
//0. setup
//1. compress the data with zlib, obtain zptr, zbytes
//2. obtain an offset from allmydata -> dstoffset
//3. obtain a new entry in lut_compression -> idcompression
//4. copy the [zptr,zptr+zbytes] into in allmydata, starting from dstoffset
//5. for all blocks, set the myblockindices[blockids[i]] to a valid state
//6. set nblocks to zero
Timer timer; timer.start();
const unsigned char * const zptr = inputbuffer;
size_t zbytes = bufsize;
size_t dstoffset = -1;
int idcompression = -1;
//1.
/* ZLIB/LZ4 (LOSSLESS COMPRESSION) */
{
/* TODO: replace the following code with: zbytes = zcompress(inputbuffer, bufsize, maxsize); */
z_stream myzstream = {0};
deflateInit(&myzstream, Z_DEFAULT_COMPRESSION); // Z_BEST_COMPRESSION
//deflateInit(&myzstream, Z_BEST_COMPRESSION);
unsigned mah = maxsize;
int err = deflate_inplace(&myzstream, inputbuffer, bufsize, &mah);
assert(err == Z_OK);
zbytes = myzstream.total_out;
// fixed memory leak, Fabian (08 Oct 2014)
err = deflateEnd(&myzstream);
assert(err == Z_OK);
}
//2-3.
#pragma omp critical
{
dstoffset = written_bytes;
written_bytes += zbytes;
//exception: we have to resize allmydata
if (written_bytes > allmydata.size())
{
//yield-based wait until writes complete
while (pending_writes != completed_writes) sched_yield();
//safely resize
printf("resizing allmydata to %ld bytes\n", written_bytes);
allmydata.resize(written_bytes);
}
idcompression = lut_compression.size();
lut_compression.push_back(dstoffset);
++pending_writes;
}
//4.
assert(allmydata.size() >= written_bytes);
memcpy(&allmydata.front() + dstoffset, zptr, zbytes);
#pragma omp atomic
++completed_writes;
//5.
for(int i = 0; i < nblocks; ++i)
{
const int entry = metablocks[i].idcompression;
assert(entry >= 0 && entry < myblockindices.size());
myblockindices[entry] = metablocks[i];
myblockindices[entry].idcompression = idcompression;
myblockindices[entry].subid = i;
}
//6.
bufsize = 0;
nblocks = 0;
return timer.stop();
}
template<int channel>
void _compress(const vector<BlockInfo>& vInfo, const int NBLOCKS, IterativeStreamer streamer)
{
int compress_threads = omp_get_max_threads();
if (compress_threads < 1) compress_threads = 1;
#pragma omp parallel
{
const int tid = omp_get_thread_num();
CompressionBuffer & mybuf = workbuffer[tid];
long mybytes = 0; int myhotblocks = 0;
float tfwt = 0, tencode = 0;
Timer timer;
timer.start();
#pragma omp for
for(int i = 0; i < NBLOCKS; ++i)
{
Timer tw; tw.start();
//wavelet compression
{
FluidBlock& b = *(FluidBlock*)vInfo[i].ptrBlock;
WaveletCompressor compressor;
Real * const mysoabuffer = &compressor.uncompressed_data()[0][0][0];
if(streamer.name() == "StreamerGridPointIterative")
{
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)] = streamer.template operate<channel>(b(ix, iy, iz));
}
else
{
IterativeStreamer mystreamer(b);
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)] = mystreamer.operate(ix, iy, iz);
}
//wavelet digestion
#if defined(_USE_WAVZ_)
const int nbytes = (int)compressor.compress(this->threshold, this->halffloat, this->wtype_write);
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_FPZIP_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
int is_float = (sizeof(Real)==4)? 1 : 0;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
int fpzip_prec = (int)this->threshold;
fpz_compress3D((void *)mysoabuffer, inbytes, layout, (void *) compressor.compressed_data(), (unsigned int *)&nbytes, is_float, fpzip_prec);
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_ZFP_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
double zfp_acc = this->threshold;
int is_float = (sizeof(Real)==4)? 1 : 0;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
size_t nbytes_zfp;
int status = zfp_compress_buffer(mysoabuffer, layout[0], layout[1], layout[2], zfp_acc, is_float, (unsigned char *)compressor.compressed_data(), &nbytes_zfp);
nbytes = nbytes_zfp;
#if VERBOSE
printf("zfp_compress status = %d, from %d to %d bytes = %d\n", status, inbytes, nbytes);
#endif
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#elif defined(_USE_SZ_)
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes;
int is_float = (sizeof(Real)==4)? 1 : 0;
double sz_abs_acc = 0.0;
double sz_rel_acc = 0.0;
double sz_pwr_acc = 0.0;
int sz_pwr_type = SZ_PWR_MAX_TYPE;
if(getenv("SZ_ABS_ACC")) sz_abs_acc = atof(getenv("SZ_ABS_ACC"));
sz_abs_acc = (double) this->threshold;
int layout[4] = {_BLOCKSIZE_, _BLOCKSIZE_, _BLOCKSIZE_, 1};
size_t *bytes_sz = (size_t *)malloc(sizeof(size_t));
unsigned char *compressed_sz = SZ_compress_args(is_float? SZ_FLOAT:SZ_DOUBLE, (unsigned char *)mysoabuffer, bytes_sz, ABS, sz_abs_acc, sz_rel_acc, sz_pwr_acc, sz_pwr_type, 0, 0, layout[2], layout[1], layout[0]);
nbytes = *bytes_sz;
memcpy(compressor.compressed_data(), compressed_sz, nbytes);
free(bytes_sz);
free(compressed_sz);
#if VERBOSE
printf("SZ_compress_args: from %d to %d bytes\n", inbytes, nbytes);
#endif
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
memcpy(mybuf.compressedbuffer + mybytes, compressor.compressed_data(), sizeof(unsigned char) * nbytes);
#else /* NO COMPRESSION */
const int inbytes = FluidBlock::sizeX * FluidBlock::sizeY * FluidBlock::sizeZ * sizeof(Real);
int nbytes = inbytes;
memcpy(mybuf.compressedbuffer + mybytes, &nbytes, sizeof(nbytes));
mybytes += sizeof(nbytes);
#if defined(_USE_ZEROBITS_)
// set some bits to zero
for(int iz=0; iz<FluidBlock::sizeZ; iz++)
for(int iy=0; iy<FluidBlock::sizeY; iy++)
for(int ix=0; ix<FluidBlock::sizeX; ix++)
float_zero_bits((unsigned int *)&mysoabuffer[ix + _BLOCKSIZE_ * (iy + _BLOCKSIZE_ * iz)], _ZEROBITS_);
#endif
memcpy(mybuf.compressedbuffer + mybytes, mysoabuffer, sizeof(unsigned char) * nbytes);
#endif
mybytes += nbytes;
}
tfwt += tw.stop();
//building the meta data
{
BlockMetadata curr = { i, myhotblocks, vInfo[i].index[0], vInfo[i].index[1], vInfo[i].index[2]};
mybuf.hotblocks[myhotblocks] = curr;
myhotblocks++;
}
if (mybytes >= ALERT || myhotblocks >= ENTRIES)
tencode = _encode_and_flush(mybuf.compressedbuffer, mybytes, (long)BUFFERSIZE, mybuf.hotblocks, myhotblocks);
}
if (mybytes > 0)
tencode = _encode_and_flush(mybuf.compressedbuffer, mybytes, (long)BUFFERSIZE, mybuf. hotblocks, myhotblocks);
workload_total[tid] = timer.stop();
workload_fwt[tid] = tfwt;
workload_encode[tid] = tencode;
}
}
virtual void _to_file(const MPI_Comm mycomm, const string fileName)
{
int mygid;
int nranks;
MPI_Comm_rank(mycomm, &mygid);
MPI_Comm_size(mycomm, &nranks);
MPI_Info myfileinfo;
MPI_Info_create(&myfileinfo);
string key("access_style");
string val("write_once");
MPI_Info_set(myfileinfo, (char*)key.c_str(), (char*)val.c_str());
MPI_File myfile;
#if defined(_WRITE_AT_ALL_)
MPI_File_open(mycomm, (char*)fileName.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, myfileinfo, &myfile);
#else
MPI_File_open(MPI_COMM_SELF, (char*)fileName.c_str(), MPI_MODE_WRONLY | MPI_MODE_CREATE, myfileinfo, &myfile);
#endif
MPI_Info_free(&myfileinfo);
size_t current_displacement = 0;
//write the mini-header
{
size_t blank_address = -1;
const int miniheader_bytes = sizeof(blank_address) + binaryocean_title.size();
current_displacement += miniheader_bytes;
}
//write the buffer - alias the binary ocean
{
// apparently this suck:
// myfile.Seek_shared(current_displacement, MPI_SEEK_SET);
// myfile.Write_ordered(&allmydata.front(), written_bytes, MPI_CHAR);
// current_displacement = myfile.Get_position_shared();
// so here we do it manually. so nice!
size_t myfileoffset = 0;
MPI_Exscan(&written_bytes, &myfileoffset, 1, MPI_UINT64_T, MPI_SUM, mycomm);
if (mygid == 0)
myfileoffset = 0;
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + myfileoffset, &allmydata.front(), written_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + myfileoffset, &allmydata.front(), written_bytes, MPI_CHAR, &status);
#endif
//here we update current_displacement by broadcasting the total written bytes from rankid = nranks -1
size_t total_written_bytes = myfileoffset + written_bytes;
MPI_Bcast(&total_written_bytes, 1, MPI_UINT64_T, nranks - 1, mycomm);
current_displacement += total_written_bytes;
}
//go back at the blank address and fill it with the displacement
if (mygid == 0)
{
MPI_Status status;
MPI_File_write(myfile, ¤t_displacement, sizeof(current_displacement), MPI_CHAR, &status);
MPI_File_write(myfile, (void*)binaryocean_title.c_str(), binaryocean_title.size(), MPI_CHAR, &status);
}
//write the header
{
const size_t header_bytes = header.size();
MPI_Status status;
if (mygid == 0)
MPI_File_write_at(myfile, current_displacement, (void*)header.c_str(), header_bytes, MPI_CHAR, &status);
current_displacement += header_bytes;
}
//write block metadata
{
const int metadata_bytes = myblockindices.size() * sizeof(BlockMetadata);
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + mygid * metadata_bytes, &myblockindices.front(), metadata_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + mygid * metadata_bytes, &myblockindices.front(), metadata_bytes, MPI_CHAR, &status);
#endif
current_displacement += metadata_bytes * nranks;
}
//write the lut title
{
const int title_bytes = binarylut_title.size();
MPI_Status status;
if (mygid == 0)
MPI_File_write_at(myfile, current_displacement, (void*)binarylut_title.c_str(), title_bytes, MPI_CHAR, &status);
current_displacement += title_bytes;
}
//write the local buffer entries
{
assert(lut_compression.size() == 0);
const int lutheader_bytes = sizeof(lutheader);
MPI_Status status;
#if defined(_WRITE_AT_ALL_)
MPI_File_write_at_all(myfile, current_displacement + mygid * lutheader_bytes, &lutheader, lutheader_bytes, MPI_CHAR, &status);
#else
MPI_File_write_at(myfile, current_displacement + mygid * lutheader_bytes, &lutheader, lutheader_bytes, MPI_CHAR, &status);
#endif
}
MPI_File_close(&myfile); //bon voila tu vois ou quoi
}
float _profile_report(const char * const workload_name, vector<float>& workload, const MPI_Comm mycomm, bool isroot)
{
float tmin = *std::min_element(workload.begin(), workload.end());
float tmax = *std::max_element(workload.begin(), workload.end());
float tsum = std::accumulate(workload.begin(), workload.end(), 0.f);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tmin, &tmin, 1, MPI_FLOAT, MPI_MIN, 0, mycomm);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tsum, &tsum, 1, MPI_FLOAT, MPI_SUM, 0, mycomm);
MPI_Reduce(isroot ? MPI_IN_PLACE : &tmax, &tmax, 1, MPI_FLOAT, MPI_MAX, 0, mycomm);
int comm_size;
MPI_Comm_size(mycomm, &comm_size);
tsum /= comm_size * workload.size();
if (isroot)
printf("TLP %-10s min:%.3e s avg:%.3e s max:%.3e s imb:%.0f%%\n",
workload_name, tmin, tsum, tmax, (tmax - tmin) / tsum * 100);
return tsum;
}
template<int channel>
void _write(GridType & inputGrid, string fileName, IterativeStreamer streamer)
{
const vector<BlockInfo> infos = inputGrid.getBlocksInfo();
const int NBLOCKS = infos.size();
//prepare the headers
{
this->binaryocean_title = "\n==============START-BINARY-OCEAN==============\n";
this->binarylut_title = "\n==============START-BINARY-LUT==============\n";
{
const int xtotalbpd = inputGrid.getBlocksPerDimension(0);
const int ytotalbpd = inputGrid.getBlocksPerDimension(1);
const int ztotalbpd = inputGrid.getBlocksPerDimension(2);
const int xbpd = inputGrid.getResidentBlocksPerDimension(0);
const int ybpd = inputGrid.getResidentBlocksPerDimension(1);
const int zbpd = inputGrid.getResidentBlocksPerDimension(2);
const double xExtent = inputGrid.getH()*xtotalbpd*_BLOCKSIZE_;
const double yExtent = inputGrid.getH()*ytotalbpd*_BLOCKSIZE_;
const double zExtent = inputGrid.getH()*ztotalbpd*_BLOCKSIZE_;
std::stringstream ss;
ss << "\n==============START-ASCI-HEADER==============\n";
{
int one = 1;
bool isone = *(char *)(&one);
ss << "Endianess: " << (isone ? "little" : "big") << "\n";
}
ss << "sizeofReal: " << sizeof(Real) << "\n";
ss << "sizeofsize_t: " << sizeof(size_t) << "\n";
ss << "sizeofBlockMetadata: " << sizeof(BlockMetadata) << "\n";
ss << "sizeofHeaderLUT: " << sizeof(HeaderLUT) << "\n";
ss << "sizeofCompressedBlock: " << sizeof(CompressedBlock) << "\n";
ss << "Blocksize: " << _BLOCKSIZE_ << "\n";
ss << "Blocks: " << xtotalbpd << " x " << ytotalbpd << " x " << ztotalbpd << "\n";
ss << "Extent: " << xExtent << " " << yExtent << " " << zExtent << "\n";
ss << "SubdomainBlocks: " << xbpd << " x " << ybpd << " x " << zbpd << "\n";
ss << "HalfFloat: " << (this->halffloat ? "yes" : "no") << "\n";
#if defined(_USE_WAVZ_)
ss << "Wavelets: " << WaveletsOnInterval::ChosenWavelets_GetName(this->wtype_write) << "\n";
#elif defined(_USE_FPZIP_)
ss << "Wavelets: " << "fpzip" << "\n";
#elif defined(_USE_ZFP_)
ss << "Wavelets: " << "zfp" << "\n";
#elif defined(_USE_SZ_)
ss << "Wavelets: " << "sz" << "\n";
#else
ss << "Wavelets: " << "none" << "\n";
#endif
ss << "WaveletThreshold: " << threshold << "\n";
#if defined(_USE_ZLIB_)
ss << "Encoder: " << "zlib" << "\n";
#elif defined(_USE_LZ4_)
ss << "Encoder: " << "lz4" << "\n";
#else
ss << "Encoder: " << "none" << "\n";
#endif
ss << "==============START-BINARY-METABLOCKS==============\n";
this->header = ss.str();
}
}
//compress my data, prepare for serialization
{
double t0 = MPI_Wtime();
written_bytes = pending_writes = completed_writes = 0;
if (allmydata.size() == 0)
{
//const size_t speculated_compression_rate = 10;
allmydata.resize(NBLOCKS * sizeof(Real) * NPTS + 4*1024*1024);
}
myblockindices.clear();
myblockindices.resize(NBLOCKS);
lut_compression.clear();
_compress<channel>(infos, infos.size(), streamer);
//manipulate the file data (allmydata, lut_compression, myblockindices)
//so that they are file-friendly
{
const int nchunks = lut_compression.size();
const size_t extrabytes = lut_compression.size() * sizeof(size_t);
const char * const lut_ptr = (char *)&lut_compression.front();
allmydata.insert(allmydata.begin() + written_bytes, lut_ptr, lut_ptr + extrabytes);
lut_compression.clear();
HeaderLUT newvalue = { written_bytes + extrabytes, nchunks };
lutheader = newvalue;
written_bytes += extrabytes;
}
double t1 = MPI_Wtime();
printf("SerializerIO_WaveletCompression_MPI_Simple.h: compress+serialization %f seconds\n", t1-t0);
}
double io_t0 = MPI_Wtime();
///
const MPI_Comm mycomm = inputGrid.getCartComm();
int mygid;
int comm_size;
MPI_Comm_rank(mycomm, &mygid);
MPI_Comm_size(mycomm, &comm_size);
//write into the file
Timer timer; timer.start();
if (getenv("CUBISMZ_NOIO") == NULL)
_to_file(mycomm, fileName);
vector<float> workload_file(1, timer.stop());
///
double io_t1 = MPI_Wtime();
#if VERBOSE
printf("SerializerIO_WaveletCompression_MPI_Simple.h: _to_file %f seconds\n", io_t1-io_t0);
#endif
//just a report now
if (verbosity)
{
size_t aggregate_written_bytes = -1;
MPI_Reduce(&written_bytes, &aggregate_written_bytes, 1, MPI_UINT64_T, MPI_SUM, 0, mycomm);
const bool isroot = mygid == 0;
if (mygid == 0)
printf("Channel %d: %.2f kB, wavelet-threshold: %.1e, compr. rate: %.2f\n",
channel, aggregate_written_bytes/1024.,
threshold, NPTS * sizeof(Real) * NBLOCKS * comm_size / (float) aggregate_written_bytes);
#if VERBOSE
const float tavgcompr = _profile_report("Compr", workload_total, mycomm, isroot);
const float tavgfwt =_profile_report("FWT+decim", workload_fwt, mycomm, isroot);
const float tavgenc =_profile_report("Encoding", workload_encode, mycomm, isroot);
const float tavgio =_profile_report("FileIO", workload_file, mycomm, isroot);
const float toverall = tavgio + tavgcompr;
if (isroot)
{
printf("Time distribution: %5s:%.0f%% %5s:%.0f%% %5s:%.0f%% %5s:%.0f%%\n",
"FWT ", tavgfwt / toverall * 100,
"ENC ", tavgenc / toverall * 100,
"IO ", tavgio / toverall * 100,
"Other", (tavgcompr - tavgfwt - tavgenc)/ toverall * 100);
}
#endif
}
}
void _read(string path)
{
//THE FIRST PART IS SEQUENTIAL
//THE SECOND ONE IS RANDOM ACCESS
size_t global_header_displacement = -1;
int NBLOCKS = -1;
int totalbpd[3] = {-1, -1, -1};
int bpd[3] = { -1, -1, -1};
string binaryocean_title = "\n==============START-BINARY-OCEAN==============\n";
const int miniheader_bytes = sizeof(size_t) + binaryocean_title.size();
vector<BlockMetadata> metablocks;
//random access data structures: meta2subchunk, lutchunks;
map<int, map<int, map<int, CompressedBlock > > > meta2subchunk;
vector<size_t> lutchunks;
{
FILE * file = fopen(path.c_str(), "rb");
assert(file);
//reading the header and mini header
{
size_t header_displacement = -1;
fread(&header_displacement, sizeof(size_t), 1, file);
fseek(file, header_displacement, SEEK_SET);
global_header_displacement = header_displacement;
char buf[1024];
fgets(buf, sizeof(buf), file);
fgets(buf, sizeof(buf), file);
assert(string("==============START-ASCI-HEADER==============\n") == string(buf));
fscanf(file, "Endianess: %s\n", buf);
{
int one = 1;
bool isone = *(char *)(&one);
if (isone)
assert(string(buf) == "little");
else
assert(string(buf) == "big");
}
int sizeofreal = -1;
fscanf(file, "sizeofReal: %d\n", &sizeofreal);
assert(sizeof(Real) == sizeofreal);
int bsize = -1;
fscanf(file, "Blocksize: %d\n", &bsize);
assert(bsize == _BLOCKSIZE_);
fscanf(file, "Blocks: %d x %d x %d\n", totalbpd, totalbpd + 1, totalbpd + 2);
fscanf(file, "SubdomainBlocks: %d x %d x %d\n", bpd, bpd + 1, bpd + 2);
fscanf(file, "HalfFloat: %s\n", buf);
this->halffloat = (string(buf) == "yes");
fscanf(file, "Wavelets: %s\n", buf);
assert(buf == string(WaveletsOnInterval::ChosenWavelets_GetName(this->wtype_read)));
fscanf(file, "Encoder: %s\n", buf);
#if defined(_USE_ZLIB_)
assert(buf == string("zlib"));
#else /* _USE_LZ4_ */
assert(buf == string("lz4"));
#endif
fgets(buf, sizeof(buf), file);
assert(string("==============START-BINARY-METABLOCKS==============\n") == string(buf));
NBLOCKS = totalbpd[0] * totalbpd[1] * totalbpd[2];
}
//reading the binary lut
{
metablocks.resize(NBLOCKS);
for(int i = 0; i < NBLOCKS; ++i)
{
BlockMetadata entry;
fread(&entry, sizeof(entry), 1, file);
assert(entry.idcompression >= 0 && entry.idcompression < bpd[0] * bpd[1] * bpd[2]);
metablocks[i] = entry;
}
}
//reading the compression lut
{
char buf[1024];
fgetc(file);
fgets(buf, sizeof(buf), file);
assert(string("==============START-BINARY-LUT==============\n") == string(buf));
//bool done = false;
size_t base = miniheader_bytes;
const int BPS = bpd[0] * bpd[1] * bpd[2];
assert(NBLOCKS % BPS == 0);
const int SUBDOMAINS = NBLOCKS / BPS;
vector<HeaderLUT> headerluts(SUBDOMAINS); //oh mamma mia
fread(&headerluts.front(), sizeof(HeaderLUT), SUBDOMAINS, file);
for(int s = 0, currblock = 0; s < SUBDOMAINS; ++s)
{
const int nglobalchunks = lutchunks.size();
assert(!feof(file));
const int nchunks = headerluts[s].nchunks;
const size_t myamount = headerluts[s].aggregate_bytes;
const size_t lutstart = base + myamount - sizeof(size_t) * nchunks;
//read the lut
fseek(file, lutstart, SEEK_SET);
vector<size_t> mylut(nchunks);
fread(&mylut.front(), sizeof(size_t), nchunks, file);
for(int i=0; i< nchunks; ++i)
assert(mylut[i] < myamount);
for(int i=1; i< nchunks; ++i)
assert(mylut[i-1] < mylut[i]);
//compute the global positioning of the compressed chunks within the file
for(int i = 0; i < mylut.size(); ++i)
{
assert(mylut[i] < myamount);
mylut[i] += base;
}
assert(myamount > 0);
base += myamount;
assert(base <= global_header_displacement);
//compute the base for this blocks
for(int i = 0; i < BPS; ++i, ++currblock)
metablocks[currblock].idcompression += nglobalchunks;
lutchunks.insert(lutchunks.end(), mylut.begin(), mylut.end());
}
assert(base == global_header_displacement);
lutchunks.push_back(base);
{
int c = fgetc(file);
do
{
//printf("shouldnt be here! 0x%x\n", c);
c = fgetc(file);
}
while (! feof(file) );
}
}
fclose(file);
}
for(int i = 0; i < NBLOCKS ; ++i)
{
BlockMetadata entry = metablocks[i];
assert(entry.idcompression >= 0);
assert(entry.idcompression < lutchunks.size()-1);
size_t start_address = lutchunks[entry.idcompression];
size_t end_address = lutchunks[entry.idcompression + 1];
assert(start_address < end_address);
assert(end_address <= global_header_displacement);
assert( start_address < global_header_displacement );
CompressedBlock compressedblock = { start_address, end_address - start_address, entry.subid };
meta2subchunk[entry.iz][entry.iy][entry.ix] = compressedblock;
}
//at this point we can access every block
{
//lets try with block 7, 3, 5
FILE * f = fopen(path.c_str(), "rb");
assert(f);
const int ix = 0;
const int iy = 2;
const int iz = 0;
assert(ix >= 0 && ix < totalbpd[0]);
assert(iy >= 0 && iy < totalbpd[1]);
assert(iz >= 0 && iz < totalbpd[2]);
assert(meta2subchunk.find(iz) != meta2subchunk.end());
assert(meta2subchunk[iz].find(iy) != meta2subchunk[iz].end());
assert(meta2subchunk[iz][iy].find(ix) != meta2subchunk[iz][iy].end());
CompressedBlock compressedchunk = meta2subchunk[iz][iy][ix];
size_t start = compressedchunk.start;
assert(start >= miniheader_bytes);
assert(start < global_header_displacement);
assert(start + compressedchunk.extent < global_header_displacement);
vector<unsigned char> compressedbuf(compressedchunk.extent);
fseek(f, compressedchunk.start, SEEK_SET);
fread(&compressedbuf.front(), compressedchunk.extent, 1, f);
assert(!feof(f));
vector<unsigned char> waveletbuf(4 << 20);
const size_t decompressedbytes = zdecompress(&compressedbuf.front(), compressedbuf.size(), &waveletbuf.front(), waveletbuf.size());
int readbytes = 0;
for(int i = 0; i<compressedchunk.subid; ++i)
{
int nbytes = *(int *)&waveletbuf[readbytes];
readbytes += sizeof(int);
assert(readbytes <= decompressedbytes);
//printf("scanning nbytes...%d\n", nbytes);
readbytes += nbytes;
assert(readbytes <= decompressedbytes);
}
Real MYBLOCK[_BLOCKSIZE_][_BLOCKSIZE_][_BLOCKSIZE_];
{
int nbytes = *(int *)&waveletbuf[readbytes];
readbytes += sizeof(int);
assert(readbytes <= decompressedbytes);
WaveletCompressor compressor;
memcpy(compressor.compressed_data(), &waveletbuf[readbytes], nbytes);
readbytes += nbytes;
compressor.decompress(halffloat, nbytes, wtype_read, MYBLOCK);
}
for(int iz = 0; iz< _BLOCKSIZE_; ++iz)
for(int iy = 0; iy< _BLOCKSIZE_; ++iy)
for(int ix = 0; ix< _BLOCKSIZE_; ++ix)
printf("%d %d %d: %e\n", ix, iy, iz, MYBLOCK[iz][iy][ix]);
fclose(f);
}
}
public:
void set_threshold(const Real threshold) { this->threshold = threshold; }
void set_wtype_write(const int wtype) { this->wtype_write = wtype; }
void set_wtype_read(const int wtype) { this->wtype_read = wtype; }
void float16() { halffloat = true; }
void verbose() { verbosity = true; }
SerializerIO_WaveletCompression_MPI_SimpleBlocking():
written_bytes(0), pending_writes(0),
threshold(0), halffloat(false), verbosity(false),
workload_total(omp_get_max_threads()), workload_fwt(omp_get_max_threads()), workload_encode(omp_get_max_threads()),
workbuffer(omp_get_max_threads())
{
wtype_write = 1; // peh
wtype_read = 1; // peh
}
template< int channel >
void Write(GridType & inputGrid, string fileName, IterativeStreamer streamer = IterativeStreamer())
{
std::stringstream ss;
//ss << "." << streamer.name() << ".channel" << channel;
if (channel > 0)
ss << "." << streamer.name() << ".ch" << channel;
_write<channel>(inputGrid, fileName + ss.str(), streamer);
}
void Read(string fileName, IterativeStreamer streamer = IterativeStreamer())
{
for(int channel = 0; channel < NCHANNELS; ++channel)
{
std::stringstream ss;
if (channel > 0)
ss << "." << streamer.name() << ".ch" << channel;
_read(fileName + ss.str());
}
}
};
|
kmp_atomic_cas.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <stdbool.h>
#include <omp.h>
// Used to detect architecture
#include "../../src/kmp_platform.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef void* ident_t;
extern bool
__kmpc_atomic_bool_1_cas(ident_t *loc, int gtid, char *x, char e, char d);
extern bool
__kmpc_atomic_bool_2_cas(ident_t *loc, int gtid, short *x, short e, short d);
extern bool
__kmpc_atomic_bool_4_cas(ident_t *loc, int gtid, int *x, int e, int d);
extern bool
__kmpc_atomic_bool_8_cas(ident_t *loc, int gtid, long long *x, long long e,
long long d);
extern char
__kmpc_atomic_val_1_cas(ident_t *loc, int gtid, char *x, char e, char d);
extern short
__kmpc_atomic_val_2_cas(ident_t *loc, int gtid, short *x, short e, short d);
extern int
__kmpc_atomic_val_4_cas(ident_t *loc, int gtid, int *x, int e, int d);
extern long long
__kmpc_atomic_val_8_cas(ident_t *loc, int gtid, long long *x, long long e,
long long d);
#ifdef __cplusplus
}
#endif
int main() {
int ret = 0;
#if KMP_ARCH_X86 || KMP_ARCH_X86_64
bool r;
char c0 = 1;
char c1 = 2;
char c2 = 3;
char co = 2;
char cc = 0;
short s0 = 11;
short s1 = 12;
short s2 = 13;
short so = 12;
short sc = 0;
int i0 = 211;
int i1 = 212;
int i2 = 213;
int io = 212;
int ic = 0;
long long l0 = 3111;
long long l1 = 3112;
long long l2 = 3113;
long long lo = 3112;
long long lc = 0;
// initialize OpenMP runtime library
omp_set_dynamic(0);
// #pragma omp atomic compare update capture
// { r = x == e; if(r) { x = d; } }
// char, co == c1 initially, co == c2 finally
r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c0, c2); // no-op
if (co != c1) {
ret++; printf("Error bool_1_cas no-op: %d != %d\n", co, c1); }
if (r) { ret++; printf("Error bool_1_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_1_cas(NULL, 0, &co, c1, c2);
if (co != c2) {
ret++; printf("Error bool_1_cas: %d != %d\n", co, c2); }
if (!r) { ret++; printf("Error bool_1_cas ret: %d\n", r); }
// short
r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s0, s2); // no-op
if (so != s1) {
ret++; printf("Error bool_2_cas no-op: %d != %d\n", so, s1); }
if (r) { ret++; printf("Error bool_2_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_2_cas(NULL, 0, &so, s1, s2);
if (so != s2) {
ret++; printf("Error bool_2_cas: %d != %d\n", so, s2); }
if (!r) { ret++; printf("Error bool_2_cas ret: %d\n", r); }
// int
r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i0, i2); // no-op
if (io != i1) {
ret++; printf("Error bool_4_cas no-op: %d != %d\n", io, i1); }
if (r) { ret++; printf("Error bool_4_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_4_cas(NULL, 0, &io, i1, i2);
if (io != i2) {
ret++; printf("Error bool_4_cas: %d != %d\n", io, i2); }
if (!r) { ret++; printf("Error bool_4_cas ret: %d\n", r); }
// long long
r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l0, l2); // no-op
if (lo != l1) {
ret++; printf("Error bool_8_cas no-op: %lld != %lld\n", lo, l1); }
if (r) { ret++; printf("Error bool_8_cas no-op ret: %d\n", r); }
r = __kmpc_atomic_bool_8_cas(NULL, 0, &lo, l1, l2);
if (lo != l2) {
ret++; printf("Error bool_8_cas: %lld != %lld\n", lo, l2); }
if (!r) { ret++; printf("Error bool_8_cas ret: %d\n", r); }
// #pragma omp atomic compare update capture
// { v = x; if (x == e) { x = d; } }
// char, co == c2 initially, co == c1 finally
cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c0, c1); // no-op
if (co != c2) {
ret++; printf("Error val_1_cas no-op: %d != %d\n", co, c2); }
if (cc != c2) {
ret++; printf("Error val_1_cas no-op ret: %d != %d\n", cc, c2); }
cc = __kmpc_atomic_val_1_cas(NULL, 0, &co, c2, c1);
if (co != c1) {
ret++; printf("Error val_1_cas: %d != %d\n", co, c1); }
if (cc != c2) { ret++; printf("Error val_1_cas ret: %d != %d\n", cc, c2); }
// short
sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s0, s1); // no-op
if (so != s2) {
ret++; printf("Error val_2_cas no-op: %d != %d\n", so, s2); }
if (sc != s2) {
ret++; printf("Error val_2_cas no-op ret: %d != %d\n", sc, s2); }
sc = __kmpc_atomic_val_2_cas(NULL, 0, &so, s2, s1);
if (so != s1) {
ret++; printf("Error val_2_cas: %d != %d\n", so, s1); }
if (sc != s2) {
ret++; printf("Error val_2_cas ret: %d != %d\n", sc, s2); }
// int
ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i0, i1); // no-op
if (io != i2) {
ret++; printf("Error val_4_cas no-op: %d != %d\n", io, i2); }
if (ic != i2) {
ret++; printf("Error val_4_cas no-op ret: %d != %d\n", ic, i2); }
ic = __kmpc_atomic_val_4_cas(NULL, 0, &io, i2, i1);
if (io != i1) {
ret++; printf("Error val_4_cas: %d != %d\n", io, i1); }
if (ic != i2) {
ret++; printf("Error val_4_cas ret: %d != %d\n", ic, i2); }
// long long
lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l0, l1); // no-op
if (lo != l2) {
ret++; printf("Error val_8_cas no-op: %lld != %lld\n", lo, l2); }
if (lc != l2) {
ret++; printf("Error val_8_cas no-op ret: %lld != %lld\n", lc, l2); }
lc = __kmpc_atomic_val_8_cas(NULL, 0, &lo, l2, l1);
if (lo != l1) {
ret++; printf("Error val_8_cas: %lld != %lld\n", lo, l1); }
if (lc != l2) {
ret++; printf("Error val_8_cas ret: %lld != %lld\n", lc, l2); }
// check in parallel
i0 = 1;
i1 = 0;
for (io = 0; io < 5; ++io) {
#pragma omp parallel num_threads(2) private(i2, ic, r)
{
if (omp_get_thread_num() == 0) {
// th0 waits for th1 to increment i1, then th0 increments i0
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1);
while(ic != i2) {
#pragma omp atomic read
i2 = i1;
ic = __kmpc_atomic_val_4_cas(NULL, 0, &i0, i2, i2 + 1);
}
} else {
// th1 increments i1 if it is equal to i0 - 1, letting th0 to proceed
r = 0;
while(!r) {
#pragma omp atomic read
i2 = i0;
r = __kmpc_atomic_bool_4_cas(NULL, 0, &i1, i2 - 1, i2);
}
}
}
}
if (i0 != 6 || i1 != 5) {
ret++;
printf("Error in parallel, %d != %d or %d != %d\n", i0, 6, i1, 5);
}
if (ret == 0)
printf("passed\n");
#else
printf("Unsupported architecture, skipping test...\n");
#endif // KMP_ARCH_X86 || KMP_ARCH_X86_64
return ret;
}
|
fx.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% FFFFF X X %
% F X X %
% FFF X %
% F X X %
% F X X %
% %
% %
% MagickCore Image Special Effects Methods %
% %
% Software Design %
% snibgo (Alan Gibson) %
% January 2022 %
% %
% %
% %
% Copyright 1999-2022 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/annotate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/decorate.h"
#include "MagickCore/distort.h"
#include "MagickCore/draw.h"
#include "MagickCore/effect.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/fx.h"
#include "MagickCore/fx-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/layer.h"
#include "MagickCore/list.h"
#include "MagickCore/log.h"
#include "MagickCore/image.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/pixel.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/property.h"
#include "MagickCore/quantum.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resample.h"
#include "MagickCore/resample-private.h"
#include "MagickCore/resize.h"
#include "MagickCore/resource_.h"
#include "MagickCore/splay-tree.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/token.h"
#include "MagickCore/transform.h"
#include "MagickCore/transform-private.h"
#include "MagickCore/utility.h"
#define MaxTokenLen 100
#define RpnInit 100
#define TableExtend 0.1
#define InitNumOprStack 50
#define MinValStackSize 100
#define InitNumUserSymbols 50
typedef long double fxFltType;
typedef enum {
oAddEq,
oSubtractEq,
oMultiplyEq,
oDivideEq,
oPlusPlus,
oSubSub,
oAdd,
oSubtract,
oMultiply,
oDivide,
oModulus,
oUnaryPlus,
oUnaryMinus,
oLshift,
oRshift,
oEq,
oNotEq,
oLtEq,
oGtEq,
oLt,
oGt,
oLogAnd,
oLogOr,
oLogNot,
oBitAnd,
oBitOr,
oBitNot,
oPow,
oQuery,
oColon,
oOpenParen,
oCloseParen,
oOpenBracket,
oCloseBracket,
oOpenBrace,
oCloseBrace,
oAssign,
oNull
} OperatorE;
typedef struct {
OperatorE op;
const char * str;
int precedence; /* Higher number is higher precedence */
int nArgs;
} OperatorT;
static const OperatorT Operators[] = {
{oAddEq, "+=", 12, 1},
{oSubtractEq, "-=", 12, 1},
{oMultiplyEq, "*=", 13, 1},
{oDivideEq, "/=", 13, 1},
{oPlusPlus, "++", 12, 0},
{oSubSub, "--", 12, 0},
{oAdd, "+", 12, 2},
{oSubtract, "-", 12, 2},
{oMultiply, "*", 13, 2},
{oDivide, "/", 13, 2},
{oModulus, "%", 13, 2},
{oUnaryPlus, "+", 14, 1},
{oUnaryMinus, "-", 14, 1},
{oLshift, "<<", 11, 2},
{oRshift, ">>", 11, 2},
{oEq, "==", 9, 2},
{oNotEq, "!=", 9, 2},
{oLtEq, "<=", 10, 2},
{oGtEq, ">=", 10, 2},
{oLt, "<", 10, 2},
{oGt, ">", 10, 2},
{oLogAnd, "&&", 6, 2},
{oLogOr, "||", 5, 2},
{oLogNot, "!", 16, 1},
{oBitAnd, "&", 8, 2},
{oBitOr, "|", 7, 2},
{oBitNot, "~", 16, 1},
{oPow, "^", 15, 2},
{oQuery, "?", 4, 1},
{oColon, ":", 4, 1},
{oOpenParen, "(", 0, 0},
{oCloseParen, ")", 0, 0},
{oOpenBracket, "[", 0, 0},
{oCloseBracket,"]", 0, 0},
{oOpenBrace, "{", 0, 0},
{oCloseBrace, "}", 0, 0},
{oAssign, "=", 3, 1},
{oNull, "onull", 17, 0}
};
typedef enum {
cEpsilon,
cE,
cOpaque,
cPhi,
cPi,
cQuantumRange,
cQuantumScale,
cTransparent,
cMaxRgb,
cNull
} ConstantE;
typedef struct {
ConstantE cons;
fxFltType val;
const char * str;
} ConstantT;
static const ConstantT Constants[] = {
{cEpsilon, MagickEpsilon, "epsilon"},
{cE, 2.7182818284590452354, "e"},
{cOpaque, 1.0, "opaque"},
{cPhi, MagickPHI, "phi"},
{cPi, MagickPI, "pi"},
{cQuantumRange, QuantumRange, "quantumrange"},
{cQuantumScale, QuantumScale, "quantumscale"},
{cTransparent, 0.0, "transparent"},
{cMaxRgb, QuantumRange, "MaxRGB"},
{cNull, 0.0, "cnull"}
};
#define FirstFunc ((FunctionE) (oNull+1))
typedef enum {
fAbs = oNull+1,
#if defined(MAGICKCORE_HAVE_ACOSH)
fAcosh,
#endif
fAcos,
fAiry,
fAlt,
#if defined(MAGICKCORE_HAVE_ASINH)
fAsinh,
#endif
fAsin,
#if defined(MAGICKCORE_HAVE_ATANH)
fAtanh,
#endif
fAtan2,
fAtan,
fCeil,
fChannel,
fClamp,
fCosh,
fCos,
fDebug,
fDrc,
#if defined(MAGICKCORE_HAVE_ERF)
fErf,
#endif
fExp,
fFloor,
fGauss,
fGcd,
fHypot,
fInt,
fIsnan,
#if defined(MAGICKCORE_HAVE_J0)
fJ0,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJ1,
#endif
#if defined(MAGICKCORE_HAVE_J1)
fJinc,
#endif
fLn,
fLogtwo,
fLog,
fMax,
fMin,
fMod,
fNot,
fPow,
fRand,
fRound,
fSign,
fSinc,
fSinh,
fSin,
fSqrt,
fSquish,
fTanh,
fTan,
fTrunc,
fDo,
fFor,
fIf,
fWhile,
fU,
fU0,
fUP,
fS,
fV,
fP,
fSP,
fVP,
fNull
} FunctionE;
typedef struct {
FunctionE func;
const char * str;
int nArgs;
} FunctionT;
static const FunctionT Functions[] = {
{fAbs, "abs" , 1},
#if defined(MAGICKCORE_HAVE_ACOSH)
{fAcosh, "acosh" , 1},
#endif
{fAcos, "acos" , 1},
{fAiry, "airy" , 1},
{fAlt, "alt" , 1},
#if defined(MAGICKCORE_HAVE_ASINH)
{fAsinh, "asinh" , 1},
#endif
{fAsin, "asin" , 1},
#if defined(MAGICKCORE_HAVE_ATANH)
{fAtanh, "atanh" , 1},
#endif
{fAtan2, "atan2" , 2},
{fAtan, "atan" , 1},
{fCeil, "ceil" , 1},
{fChannel, "channel" , 5},
{fClamp, "clamp" , 1},
{fCosh, "cosh" , 1},
{fCos, "cos" , 1},
{fDebug, "debug" , 1},
{fDrc, "drc" , 2},
#if defined(MAGICKCORE_HAVE_ERF)
{fErf, "erf" , 1},
#endif
{fExp, "exp" , 1},
{fFloor, "floor" , 1},
{fGauss, "gauss" , 2},
{fGcd, "gcd" , 2},
{fHypot, "hypot" , 2},
{fInt, "int" , 1},
{fIsnan, "isnan" , 1},
#if defined(MAGICKCORE_HAVE_J0)
{fJ0, "j0" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJ1, "j1" , 1},
#endif
#if defined(MAGICKCORE_HAVE_J1)
{fJinc, "jinc" , 1},
#endif
{fLn, "ln" , 1},
{fLogtwo, "logtwo", 1},
{fLog, "log" , 1},
{fMax, "max" , 2},
{fMin, "min" , 2},
{fMod, "mod" , 2},
{fNot, "not" , 1},
{fPow, "pow" , 2},
{fRand, "rand" , 0},
{fRound, "round" , 1},
{fSign, "sign" , 1},
{fSinc, "sinc" , 1},
{fSinh, "sinh" , 1},
{fSin, "sin" , 1},
{fSqrt, "sqrt" , 1},
{fSquish, "squish", 1},
{fTanh, "tanh" , 1},
{fTan, "tan" , 1},
{fTrunc, "trunc" , 1},
{fDo, "do", 2},
{fFor, "for", 3},
{fIf, "if", 3},
{fWhile, "while", 2},
{fU, "u", 1},
{fU0, "u0", 0},
{fUP, "up", 3},
{fS, "s", 0},
{fV, "v", 0},
{fP, "p", 2},
{fSP, "sp", 2},
{fVP, "vp", 2},
{fNull, "fnull" , 0}
};
#define FirstImgAttr ((ImgAttrE) (fNull+1))
typedef enum {
aDepth = fNull+1,
aExtent,
aKurtosis,
aMaxima,
aMean,
aMedian,
aMinima,
aPage,
aPageWid,
aPageHt,
aPageX,
aPageY,
aPrintsize,
aPrintsizeX,
aPrintsizeY,
aQuality,
aRes,
aResX,
aResY,
aSkewness,
aStdDev,
aH,
aN,
aT,
aW,
aZ,
aNull
} ImgAttrE;
typedef struct {
ImgAttrE attr;
const char * str;
int NeedStats;
} ImgAttrT;
static const ImgAttrT ImgAttrs[] = {
{aDepth, "depth", 1},
{aExtent, "extent", 0},
{aKurtosis, "kurtosis", 1},
{aMaxima, "maxima", 1},
{aMean, "mean", 1},
{aMedian, "median", 1},
{aMinima, "minima", 1},
{aPage, "page", 0},
{aPageX, "page.x", 0},
{aPageY, "page.y", 0},
{aPageWid, "page.width", 0},
{aPageHt, "page.height", 0},
{aPrintsize, "printsize", 0},
{aPrintsizeX, "printsize.x", 0},
{aPrintsizeY, "printsize.y", 0},
{aQuality, "quality", 0},
{aRes, "resolution", 0},
{aResX, "resolution.x", 0},
{aResY, "resolution.y", 0},
{aSkewness, "skewness", 1},
{aStdDev, "standard_deviation", 1},
{aH, "h", 0},
{aN, "n", 0},
{aT, "t", 0},
{aW, "w", 0},
{aZ, "z", 0},
{aNull, "anull", 0}
};
#define FirstSym ((SymbolE) (aNull+1))
typedef enum {
sHue = aNull+1,
sIntensity,
sLightness,
sLuma,
sLuminance,
sSaturation,
sA,
sB,
sC,
sG,
sI,
sJ,
sK,
sM,
sO,
sR,
sY,
sNull
} SymbolE;
typedef struct {
SymbolE sym;
const char * str;
} SymbolT;
static const SymbolT Symbols[] = {
{sHue, "hue"},
{sIntensity, "intensity"},
{sLightness, "lightness"},
{sLuma, "luma"},
{sLuminance, "luminance"},
{sSaturation, "saturation"},
{sA, "a"},
{sB, "b"},
{sC, "c"},
{sG, "g"},
{sI, "i"},
{sJ, "j"},
{sK, "k"},
{sM, "m"},
{sO, "o"},
{sR, "r"},
{sY, "y"},
{sNull, "snull"}
};
/*
There is no way to access new value of pixels. This might be a future enhancement, eg "q".
fP, oU and oV can have channel qualifier such as "u.r".
For meta channels, we might also allow numbered channels eg "u.2" or "u.16".
... or have extra argument to p[].
*/
#define FirstCont (sNull+1)
/* Run-time controls are in the RPN, not explicitly in the input string. */
typedef enum {
rGoto = FirstCont,
rIfZeroGoto,
rIfNotZeroGoto,
rCopyFrom,
rCopyTo,
rZerStk,
rNull
} ControlE;
typedef struct {
ControlE cont;
const char * str;
int nArgs;
} ControlT;
static const ControlT Controls[] = {
{rGoto, "goto", 0},
{rIfZeroGoto, "ifzerogoto", 1},
{rIfNotZeroGoto, "ifnotzerogoto", 1},
{rCopyFrom, "copyfrom", 0},
{rCopyTo, "copyto", 1},
{rZerStk, "zerstk", 0},
{rNull, "rnull", 0}
};
#define NULL_ADDRESS -2
typedef struct {
int addrQuery;
int addrColon;
} TernaryT;
typedef struct {
const char * str;
PixelChannel pixChan;
} ChannelT;
#define NO_CHAN_QUAL ((PixelChannel) (-1))
#define THIS_CHANNEL ((PixelChannel) (-2))
#define HUE_CHANNEL ((PixelChannel) (-3))
#define SAT_CHANNEL ((PixelChannel) (-4))
#define LIGHT_CHANNEL ((PixelChannel) (-5))
#define INTENSITY_CHANNEL ((PixelChannel) (-6))
static const ChannelT Channels[] = {
{"r", RedPixelChannel},
{"g", GreenPixelChannel},
{"b", BluePixelChannel},
{"c", CyanPixelChannel},
{"m", MagentaPixelChannel},
{"y", YellowPixelChannel},
{"k", BlackPixelChannel},
{"a", AlphaPixelChannel},
{"o", AlphaPixelChannel},
{"hue", HUE_CHANNEL},
{"saturation", SAT_CHANNEL},
{"lightness", LIGHT_CHANNEL},
{"intensity", INTENSITY_CHANNEL},
{"all", CompositePixelChannel},
{"this", THIS_CHANNEL},
{"", NO_CHAN_QUAL}
};
/* The index into UserSymbols is also the index into run-time UserSymVals.
*/
typedef struct {
char * pex;
size_t len;
} UserSymbolT;
typedef enum {
etOperator,
etConstant,
etFunction,
etImgAttr,
etSymbol,
etColourConstant,
etControl
} ElementTypeE;
static const char * sElementTypes[] = {
"Operator",
"Constant",
"Function",
"ImgAttr",
"Symbol",
"ColConst",
"Control"
};
typedef struct {
ElementTypeE type;
fxFltType
val, val1, val2;
int oprNum;
int nArgs;
MagickBooleanType IsRelative;
MagickBooleanType DoPush;
int EleNdx;
int nDest; /* Number of Elements that "goto" this element */
PixelChannel ChannelQual;
ImgAttrE ImgAttrQual;
char * pExpStart;
int lenExp;
} ElementT;
typedef struct {
RandomInfo * magick_restrict random_info;
int numValStack;
int usedValStack;
fxFltType * ValStack;
fxFltType * UserSymVals;
Quantum * thisPixel;
} fxRtT;
struct _FxInfo {
Image * image;
size_t ImgListLen;
ssize_t ImgNum;
MagickBooleanType NeedStats;
MagickBooleanType NeedHsl;
MagickBooleanType DebugOpt; /* Whether "-debug" option is in effect */
MagickBooleanType ContainsDebug; /* Whether expression contains "debug ()" function */
char * expression;
char * pex;
char ShortExp[MagickPathExtent]; /* for reporting */
int teDepth;
char token[MagickPathExtent];
size_t lenToken;
int numElements;
int usedElements;
ElementT * Elements; /* Elements is read-only at runtime. */
int numUserSymbols;
int usedUserSymbols;
UserSymbolT * UserSymbols;
int numOprStack;
int usedOprStack;
int maxUsedOprStack;
OperatorE * OperatorStack;
ChannelStatistics ** statistics;
int precision;
RandomInfo
**magick_restrict random_infos;
CacheView ** Views;
Image ** Images;
ExceptionInfo * exception;
fxRtT * fxrts;
};
/* Forward declarations for recursion.
*/
static MagickBooleanType TranslateStatementList
(FxInfo * pfx, const char * strLimit, char * chLimit);
static MagickBooleanType TranslateExpression
(FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll);
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe);
static MagickBooleanType InitFx (FxInfo * pfx, const Image * img, ExceptionInfo *exception)
{
int i=0;
const Image * next;
pfx->ImgListLen = GetImageListLength (img);
pfx->ImgNum = GetImageIndexInList (img);
pfx->image = (Image *)img;
pfx->NeedStats = MagickFalse;
pfx->NeedHsl = MagickFalse;
pfx->DebugOpt = IsStringTrue (GetImageArtifact (img, "fx:debug"));
pfx->statistics = NULL;
pfx->Views = NULL;
pfx->Images = NULL;
pfx->exception = exception;
pfx->precision = GetMagickPrecision ();
pfx->random_infos = AcquireRandomInfoThreadSet ();
pfx->ContainsDebug = MagickFalse;
pfx->Views =
(CacheView **) AcquireQuantumMemory (pfx->ImgListLen, sizeof (pfx->Views));
if (!pfx->Views) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Views", "%lu",
pfx->ImgListLen);
return MagickFalse;
}
next = GetFirstImageInList (img);
for ( ; next != (Image *) NULL; next=next->next)
{
pfx->Views[i] = AcquireVirtualCacheView (next, pfx->exception);
if (!pfx->Views[i]) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Views", "[%i]",
i);
return MagickFalse;
}
i++;
}
pfx->Images = ImageListToArray (img, pfx->exception);
return MagickTrue;
}
static MagickBooleanType DeInitFx (FxInfo * pfx)
{
size_t i;
if (pfx->Images) pfx->Images = (Image**) RelinquishMagickMemory (pfx->Images);
if (pfx->Views) {
for (i = GetImageListLength(pfx->image); i > 0; i--) {
pfx->Views[i-1] = DestroyCacheView (pfx->Views[i-1]);
}
pfx->Views=(CacheView **) RelinquishMagickMemory (pfx->Views);
}
pfx->random_infos = DestroyRandomInfoThreadSet (pfx->random_infos);
if (pfx->statistics) {
for (i = GetImageListLength(pfx->image); i > 0; i--) {
pfx->statistics[i-1]=(ChannelStatistics *) RelinquishMagickMemory (pfx->statistics[i-1]);
}
pfx->statistics = (ChannelStatistics**) RelinquishMagickMemory(pfx->statistics);
}
return MagickTrue;
}
static ElementTypeE TypeOfOpr (int op)
{
if (op < oNull) return etOperator;
if (op == oNull) return etConstant;
if (op <= fNull) return etFunction;
if (op <= aNull) return etImgAttr;
if (op <= sNull) return etSymbol;
if (op <= rNull) return etControl;
return (ElementTypeE) 0;
}
static char * SetPtrShortExp (FxInfo * pfx, char * pExp, size_t len)
{
#define MaxLen 20
size_t slen;
char * p;
*pfx->ShortExp = '\0';
if (pExp && len) {
slen = CopyMagickString (pfx->ShortExp, pExp, len);
if (slen > MaxLen) {
(void) CopyMagickString (pfx->ShortExp+MaxLen, "...", 4);
}
p = strchr (pfx->ShortExp, '\n');
if (p) (void) CopyMagickString (p, "...", 4);
p = strchr (pfx->ShortExp, '\r');
if (p) (void) CopyMagickString (p, "...", 4);
}
return pfx->ShortExp;
}
static char * SetShortExp (FxInfo * pfx)
{
return SetPtrShortExp (pfx, pfx->pex, MaxTokenLen-1);
}
static int FindUserSymbol (FxInfo * pfx, char * name)
/* returns index into pfx->UserSymbols, and thus into pfxrt->UserSymVals,
or NULL_ADDRESS if not found.
*/
{
int i;
size_t lenName;
lenName = strlen (name);
for (i=0; i < pfx->usedUserSymbols; i++) {
UserSymbolT *pus = &pfx->UserSymbols[i];
if (lenName == pus->len && LocaleNCompare (name, pus->pex, lenName)==0) break;
}
if (i == pfx->usedUserSymbols) return NULL_ADDRESS;
return i;
}
static MagickBooleanType ExtendUserSymbols (FxInfo * pfx)
{
pfx->numUserSymbols = (int) ceil (pfx->numUserSymbols * (1 + TableExtend));
pfx->UserSymbols = (UserSymbolT*) ResizeMagickMemory (pfx->UserSymbols, pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
return MagickTrue;
}
static int AddUserSymbol (FxInfo * pfx, char * pex, size_t len)
{
UserSymbolT *pus;
if (++pfx->usedUserSymbols >= pfx->numUserSymbols) {
if (!ExtendUserSymbols (pfx)) return -1;
}
pus = &pfx->UserSymbols[pfx->usedUserSymbols-1];
pus->pex = pex;
pus->len = len;
return pfx->usedUserSymbols-1;
}
static void DumpTables (FILE * fh)
{
int i;
for (i=0; i <= rNull; i++) {
const char * str = "";
if ( i < oNull) str = Operators[i].str;
if (i >= FirstFunc && i < fNull) str = Functions[i-FirstFunc].str;
if (i >= FirstImgAttr && i < aNull) str = ImgAttrs[i-FirstImgAttr].str;
if (i >= FirstSym && i < sNull) str = Symbols[i-FirstSym].str;
if (i >= FirstCont && i < rNull) str = Controls[i-FirstCont].str;
if (i==0 ) fprintf (stderr, "Operators:\n ");
else if (i==oNull) fprintf (stderr, "\nFunctions:\n ");
else if (i==fNull) fprintf (stderr, "\nImage attributes:\n ");
else if (i==aNull) fprintf (stderr, "\nSymbols:\n ");
else if (i==sNull) fprintf (stderr, "\nControls:\n ");
fprintf (fh, " %s", str);
}
fprintf (fh, "\n");
}
static char * NameOfUserSym (FxInfo * pfx, int ndx, char * buf)
{
UserSymbolT * pus;
assert (ndx >= 0 && ndx < pfx->usedUserSymbols);
pus = &pfx->UserSymbols[ndx];
(void) CopyMagickString (buf, pus->pex, pus->len+1);
return buf;
}
static void DumpUserSymbols (FxInfo * pfx, FILE * fh)
{
char UserSym[MagickPathExtent];
int i;
fprintf (fh, "UserSymbols (%i)\n", pfx->usedUserSymbols);
for (i=0; i < pfx->usedUserSymbols; i++) {
fprintf (fh, " %i: '%s'\n", i, NameOfUserSym (pfx, i, UserSym));
}
}
static MagickBooleanType BuildRPN (FxInfo * pfx)
{
pfx->numUserSymbols = InitNumUserSymbols;
pfx->usedUserSymbols = 0;
pfx->UserSymbols = (UserSymbolT*) AcquireMagickMemory (pfx->numUserSymbols * sizeof(UserSymbolT));
if (!pfx->UserSymbols) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymbols", "%i",
pfx->numUserSymbols);
return MagickFalse;
}
pfx->numElements = RpnInit;
pfx->usedElements = 0;
pfx->Elements = NULL;
pfx->Elements = (ElementT*) AcquireMagickMemory (pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
pfx->usedOprStack = 0;
pfx->maxUsedOprStack = 0;
pfx->numOprStack = InitNumOprStack;
pfx->OperatorStack = (OperatorE*) AcquireMagickMemory (pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OperatorStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType AllocFxRt (FxInfo * pfx, fxRtT * pfxrt)
{
int nRnd;
int i;
pfxrt->random_info = AcquireRandomInfo ();
pfxrt->thisPixel = NULL;
nRnd = 20 + 10 * (int) GetPseudoRandomValue (pfxrt->random_info);
for (i=0; i < nRnd; i++) (void) GetPseudoRandomValue (pfxrt->random_info);;
pfxrt->usedValStack = 0;
pfxrt->numValStack = 2 * pfx->maxUsedOprStack;
if (pfxrt->numValStack < MinValStackSize) pfxrt->numValStack = MinValStackSize;
pfxrt->ValStack = (fxFltType*) AcquireMagickMemory (pfxrt->numValStack * sizeof(fxFltType));
if (!pfxrt->ValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"ValStack", "%i",
pfxrt->numValStack);
return MagickFalse;
}
pfxrt->UserSymVals = NULL;
if (pfx->usedUserSymbols) {
pfxrt->UserSymVals = (fxFltType*) AcquireMagickMemory (pfx->usedUserSymbols * sizeof(fxFltType));
if (!pfxrt->UserSymVals) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"UserSymVals", "%i",
pfx->usedUserSymbols);
return MagickFalse;
}
for (i = 0; i < pfx->usedUserSymbols; i++) pfxrt->UserSymVals[i] = (fxFltType) 0;
}
return MagickTrue;
}
static MagickBooleanType ExtendRPN (FxInfo * pfx)
{
pfx->numElements = (int) ceil (pfx->numElements * (1 + TableExtend));
pfx->Elements = (ElementT*) ResizeMagickMemory (pfx->Elements, pfx->numElements * sizeof(ElementT));
if (!pfx->Elements) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Elements", "%i",
pfx->numElements);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType inline OprInPlace (int op)
{
return (op >= oAddEq && op <= oSubSub ? MagickTrue : MagickFalse);
}
static const char * OprStr (int oprNum)
{
const char * str;
if (oprNum < 0) str = "bad OprStr";
else if (oprNum <= oNull) str = Operators[oprNum].str;
else if (oprNum <= fNull) str = Functions[oprNum-FirstFunc].str;
else if (oprNum <= aNull) str = ImgAttrs[oprNum-FirstImgAttr].str;
else if (oprNum <= sNull) str = Symbols[oprNum-FirstSym].str;
else if (oprNum <= rNull) str = Controls[oprNum-FirstCont].str;
else {
str = "bad OprStr";
}
return str;
}
static MagickBooleanType DumpRPN (FxInfo * pfx, FILE * fh)
{
int i;
fprintf (fh, "DumpRPN:");
fprintf (fh, " numElements=%i", pfx->numElements);
fprintf (fh, " usedElements=%i", pfx->usedElements);
fprintf (fh, " maxUsedOprStack=%i", pfx->maxUsedOprStack);
fprintf (fh, " ImgListLen=%g", (double) pfx->ImgListLen);
fprintf (fh, " NeedStats=%s", pfx->NeedStats ? "yes" : "no");
fprintf (fh, " NeedHsl=%s\n", pfx->NeedHsl ? "yes" : "no");
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
pel->nDest = 0;
}
for (i=0; i < pfx->usedElements; i++) {
ElementT * pel = &pfx->Elements[i];
if (pel->oprNum == rGoto || pel->oprNum == rIfZeroGoto || pel->oprNum == rIfNotZeroGoto) {
if (pel->EleNdx >= 0 && pel->EleNdx < pfx->numElements) {
ElementT * pelDest = &pfx->Elements[pel->EleNdx];
pelDest->nDest++;
}
}
}
for (i=0; i < pfx->usedElements; i++) {
char UserSym[MagickPathExtent];
ElementT * pel = &pfx->Elements[i];
const char * str = OprStr (pel->oprNum);
const char *sRelAbs = "";
if (pel->oprNum == fP || pel->oprNum == fUP || pel->oprNum == fVP || pel->oprNum == fSP)
sRelAbs = pel->IsRelative ? "[]" : "{}";
if (pel->type == etColourConstant)
fprintf (fh, " %i: %s vals=%.*Lg,%.*Lg,%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type],
pfx->precision, pel->val, pfx->precision, pel->val1, pfx->precision, pel->val2,
str, sRelAbs, pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
else
fprintf (fh, " %i: %s val=%.*Lg '%s%s' nArgs=%i ndx=%i %s",
i, sElementTypes[pel->type], pfx->precision, pel->val, str, sRelAbs,
pel->nArgs, pel->EleNdx,
pel->DoPush ? "push" : "NO push");
if (pel->ImgAttrQual != aNull)
fprintf (fh, " ia=%s", OprStr(pel->ImgAttrQual));
if (pel->ChannelQual != NO_CHAN_QUAL) {
if (pel->ChannelQual == THIS_CHANNEL) fprintf (stderr, " ch=this");
else fprintf (stderr, " ch=%i", pel->ChannelQual);
}
if (pel->oprNum == rCopyTo) {
fprintf (fh, " CopyTo ==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (pel->oprNum == rCopyFrom) {
fprintf (fh, " CopyFrom <== %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
} else if (OprInPlace (pel->oprNum)) {
fprintf (fh, " <==> %s", NameOfUserSym (pfx, pel->EleNdx, UserSym));
}
if (pel->nDest > 0) fprintf (fh, " <==dest(%i)", pel->nDest);
fprintf (fh, "\n");
}
return MagickTrue;
}
static void DestroyRPN (FxInfo * pfx)
{
pfx->numOprStack = 0;
pfx->usedOprStack = 0;
if (pfx->OperatorStack) pfx->OperatorStack = (OperatorE*) RelinquishMagickMemory (pfx->OperatorStack);
pfx->numElements = 0;
pfx->usedElements = 0;
if (pfx->Elements) pfx->Elements = (ElementT*) RelinquishMagickMemory (pfx->Elements);
pfx->usedUserSymbols = 0;
if (pfx->UserSymbols) pfx->UserSymbols = (UserSymbolT*) RelinquishMagickMemory (pfx->UserSymbols);
}
static void DestroyFxRt (fxRtT * pfxrt)
{
pfxrt->usedValStack = 0;
if (pfxrt->ValStack) pfxrt->ValStack = (fxFltType*) RelinquishMagickMemory (pfxrt->ValStack);
if (pfxrt->UserSymVals) pfxrt->UserSymVals = (fxFltType*) RelinquishMagickMemory (pfxrt->UserSymVals);
pfxrt->random_info = DestroyRandomInfo (pfxrt->random_info);
}
static size_t GetToken (FxInfo * pfx)
/* Returns length of token that starts with an alpha,
or 0 if it isn't a token that starts with an alpha.
j0 and j1 have trailing digit.
Also colours like "gray47" have more trailing digits.
After intial alpha(s) also allow single "_", eg "standard_deviation".
Does not advance pfx->pex.
This splits "mean.r" etc.
*/
{
char * p = pfx->pex;
size_t len = 0;
*pfx->token = '\0';
pfx->lenToken = 0;
if (!isalpha((int)*p)) return 0;
/* Regard strings that start "icc-" or "device-",
followed by any number of alphas,
as a token.
*/
if (LocaleNCompare (p, "icc-", 4) == 0) {
len = 4;
p += 4;
while (isalpha ((int)*p)) { len++; p++; }
} else if (LocaleNCompare (p, "device-", 7) == 0) {
len = 7;
p += 7;
while (isalpha ((int)*p)) { len++; p++; }
} else {
while (isalpha ((int)*p)) { len++; p++; }
if (*p == '_') { len++; p++; }
while (isalpha ((int)*p)) { len++; p++; }
while (isdigit ((int)*p)) { len++; p++; }
}
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetToken: too long", "%g at '%s'",
(double) len, SetShortExp(pfx));
len = MaxTokenLen;
}
if (len) {
(void) CopyMagickString (pfx->token, pfx->pex, (len+1<MaxTokenLen)?len+1:MaxTokenLen);
}
pfx->lenToken = strlen (pfx->token);
return len;
}
static MagickBooleanType TokenMaybeUserSymbol (FxInfo * pfx)
{
char * p = pfx->token;
int i = 0;
while (*p) {
if (!isalpha ((int)*p++)) return MagickFalse;
i++;
}
if (i < 2) return MagickFalse;
return MagickTrue;
}
static MagickBooleanType AddElement (FxInfo * pfx, fxFltType val, int oprNum)
{
ElementT * pel;
assert (oprNum <= rNull);
if (++pfx->usedElements >= pfx->numElements) {
if (!ExtendRPN (pfx)) return MagickFalse;
}
pel = &pfx->Elements[pfx->usedElements-1];
pel->type = TypeOfOpr (oprNum);
pel->val = val;
pel->val1 = (fxFltType) 0;
pel->val2 = (fxFltType) 0;
pel->oprNum = oprNum;
pel->DoPush = MagickTrue;
pel->EleNdx = 0;
pel->ChannelQual = NO_CHAN_QUAL;
pel->ImgAttrQual = aNull;
pel->nDest = 0;
pel->pExpStart = NULL;
pel->lenExp = 0;
if (oprNum <= oNull) pel->nArgs = Operators[oprNum].nArgs;
else if (oprNum <= fNull) pel->nArgs = Functions[oprNum-FirstFunc].nArgs;
else if (oprNum <= aNull) pel->nArgs = 0;
else if (oprNum <= sNull) pel->nArgs = 0;
else pel->nArgs = Controls[oprNum-FirstCont].nArgs;
return MagickTrue;
}
static MagickBooleanType AddAddressingElement (FxInfo * pfx, int oprNum, int EleNdx)
{
ElementT * pel;
if (!AddElement (pfx, (fxFltType) 0, oprNum)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->EleNdx = EleNdx;
if (oprNum == rGoto || oprNum == rIfZeroGoto || oprNum == rIfNotZeroGoto
|| oprNum == rZerStk)
{
pel->DoPush = MagickFalse;
}
/* Note: for() may or may not need pushing,
depending on whether the value is needed, eg "for(...)+2" or debug(for(...)).
*/
return MagickTrue;
}
static MagickBooleanType AddColourElement (FxInfo * pfx, fxFltType val0, fxFltType val1, fxFltType val2)
{
ElementT * pel;
if (!AddElement (pfx, val0, oNull)) return MagickFalse;
pel = &pfx->Elements[pfx->usedElements-1];
pel->val1 = val1;
pel->val2 = val2;
pel->type = etColourConstant;
return MagickTrue;
}
static void inline SkipSpaces (FxInfo * pfx)
{
while (isspace ((int)*pfx->pex)) pfx->pex++;
}
static char inline PeekChar (FxInfo * pfx)
{
SkipSpaces (pfx);
return *pfx->pex;
}
static MagickBooleanType inline PeekStr (FxInfo * pfx, const char * str)
{
SkipSpaces (pfx);
return (LocaleNCompare (pfx->pex, str, strlen(str))==0 ? MagickTrue : MagickFalse);
}
static MagickBooleanType ExpectChar (FxInfo * pfx, char c)
{
if (PeekChar (pfx) != c) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected char", "'%c' at '%s'", c, SetShortExp (pfx));
return MagickFalse;
}
pfx->pex++;
return MagickTrue;
}
static int MaybeXYWH (FxInfo * pfx, ImgAttrE * pop)
/* If ".x" or ".y" or ".width" or ".height" increments *pop and returns 1 to 4 .
Otherwise returns 0.
*/
{
int ret=0;
if (*pop != aPage && *pop != aPrintsize && *pop != aRes) return 0;
if (PeekChar (pfx) != '.') return 0;
if (!ExpectChar (pfx, '.')) return 0;
(void) GetToken (pfx);
if (LocaleCompare ("x", pfx->token)==0) ret=1;
else if (LocaleCompare ("y", pfx->token)==0) ret=2;
else if (LocaleCompare ("width", pfx->token)==0) ret=3;
else if (LocaleCompare ("height", pfx->token)==0) ret=4;
if (!ret)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'x' or 'y' or 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
if (*pop == aPage) (*pop) = (ImgAttrE) (*pop + ret);
else {
if (ret > 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Invalid 'width' or 'height' token=", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
} else {
(*pop) = (ImgAttrE) (*pop + ret);
}
}
pfx->pex+=pfx->lenToken;
return ret;
}
static MagickBooleanType ExtendOperatorStack (FxInfo * pfx)
{
pfx->numOprStack = (int) ceil (pfx->numOprStack * (1 + TableExtend));
pfx->OperatorStack = (OperatorE*) ResizeMagickMemory (pfx->OperatorStack, pfx->numOprStack * sizeof(OperatorE));
if (!pfx->OperatorStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"OprStack", "%i",
pfx->numOprStack);
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType PushOperatorStack (FxInfo * pfx, int op)
{
if (++pfx->usedOprStack >= pfx->numOprStack) {
if (!ExtendOperatorStack (pfx))
return MagickFalse;
}
pfx->OperatorStack[pfx->usedOprStack-1] = (OperatorE) op;
if (pfx->maxUsedOprStack < pfx->usedOprStack)
pfx->maxUsedOprStack = pfx->usedOprStack;
return MagickTrue;
}
static OperatorE GetLeadingOp (FxInfo * pfx)
{
OperatorE op = oNull;
if (*pfx->pex == '-') op = oUnaryMinus;
else if (*pfx->pex == '+') op = oUnaryPlus;
else if (*pfx->pex == '~') op = oBitNot;
else if (*pfx->pex == '!') op = oLogNot;
else if (*pfx->pex == '(') op = oOpenParen;
return op;
}
static MagickBooleanType inline OprIsUnaryPrefix (OperatorE op)
{
return (op == oUnaryMinus || op == oUnaryPlus || op == oBitNot || op == oLogNot ? MagickTrue : MagickFalse);
}
static MagickBooleanType TopOprIsUnaryPrefix (FxInfo * pfx)
{
if (!pfx->usedOprStack) return MagickFalse;
return OprIsUnaryPrefix (pfx->OperatorStack[pfx->usedOprStack-1]);
}
static MagickBooleanType PopOprOpenParen (FxInfo * pfx, OperatorE op)
{
if (!pfx->usedOprStack) return MagickFalse;
if (pfx->OperatorStack[pfx->usedOprStack-1] != op) return MagickFalse;
pfx->usedOprStack--;
return MagickTrue;
}
static int GetCoordQualifier (FxInfo * pfx, int op)
/* Returns -1 if invalid CoordQualifier, +1 if valid and appropriate.
*/
{
if (op != fU && op != fV && op != fS) return -1;
(void) GetToken (pfx);
if (pfx->lenToken != 1) {
return -1;
}
if (*pfx->token != 'p' && *pfx->token != 'P') return -1;
if (!GetFunction (pfx, fP)) return -1;
return 1;
}
static PixelChannel GetChannelQualifier (FxInfo * pfx, int op)
{
if (op == fU || op == fV || op == fP ||
op == fUP || op == fVP ||
op == fS || (op >= FirstImgAttr && op <= aNull)
)
{
const ChannelT * pch = &Channels[0];
(void) GetToken (pfx);
while (*pch->str) {
if (LocaleCompare (pch->str, pfx->token)==0) {
if (op >= FirstImgAttr && op <= (OperatorE)aNull &&
(pch->pixChan == HUE_CHANNEL ||
pch->pixChan == SAT_CHANNEL ||
pch->pixChan == LIGHT_CHANNEL)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return NO_CHAN_QUAL;
}
pfx->pex += pfx->lenToken;
return pch->pixChan;
}
pch++;
}
}
return NO_CHAN_QUAL;
}
static ImgAttrE GetImgAttrToken (FxInfo * pfx)
{
ImgAttrE ia = aNull;
const char * iaStr;
for (ia = FirstImgAttr; ia < aNull; ia=(ImgAttrE) (ia+1)) {
iaStr = ImgAttrs[ia-FirstImgAttr].str;
if (LocaleCompare (iaStr, pfx->token)==0) {
pfx->pex += strlen(pfx->token);
if (ImgAttrs[ia-FirstImgAttr].NeedStats == 1) pfx->NeedStats = MagickTrue;
MaybeXYWH (pfx, &ia);
break;
}
}
if (ia == aPage || ia == aPrintsize || ia == aRes) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attribute", "'%s' needs qualifier at '%s'",
iaStr, SetShortExp(pfx));
}
return ia;
}
static ImgAttrE GetImgAttrQualifier (FxInfo * pfx, int op)
{
ImgAttrE ia = aNull;
if (op == (OperatorE)fU || op == (OperatorE)fV || op == (OperatorE)fP || op == (OperatorE)fS) {
(void) GetToken (pfx);
if (pfx->lenToken == 0) {
return aNull;
}
ia = GetImgAttrToken (pfx);
}
return ia;
}
static MagickBooleanType IsQualifier (FxInfo * pfx)
{
if (PeekChar (pfx) == '.') {
pfx->pex++;
return MagickTrue;
}
return MagickFalse;
}
static ssize_t GetProperty (FxInfo * pfx, fxFltType *val)
/* returns number of character to swallow.
"-1" means invalid input
"0" means no relevant input (don't swallow, but not an error)
*/
{
if (PeekStr (pfx, "%[")) {
int level = 0;
size_t len;
char sProperty [MagickPathExtent];
char * p = pfx->pex + 2;
while (*p) {
if (*p == '[') level++;
else if (*p == ']') {
if (level == 0) break;
level--;
}
p++;
}
if (!*p || level != 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After '%[' expected ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex + 1);
if (len > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too much text between '%[' and ']' at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (sProperty, pfx->pex, len+1);
sProperty[len] = '\0';
{
char * tailptr;
char * text;
text = InterpretImageProperties (pfx->image->image_info, pfx->image,
sProperty, pfx->exception);
if (!text) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Unknown property", "'%s' at '%s'",
sProperty, SetShortExp(pfx));
return -1;
}
*val = strtold (text, &tailptr);
if (text == tailptr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Property", "'%s' is not a number at '%s'",
text, SetShortExp(pfx));
return -1;
}
text = DestroyString(text);
}
return ((ssize_t) len);
}
return 0;
}
static size_t inline GetConstantColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Finds named colour such as "blue" and colorspace function such as "lab(10,20,30)".
Returns number of characters to swallow.
*/
{
PixelInfo
colour;
ExceptionInfo
*dummy_exception = AcquireExceptionInfo ();
char
*p;
MagickBooleanType
IsGray,
IsIcc,
IsDev;
char ColSp[MagickPathExtent];
(void) CopyMagickString (ColSp, pfx->token, MaxTokenLen);
p = ColSp + pfx->lenToken - 1;
if (*p == 'a' || *p == 'A') *p = '\0';
(void) GetPixelInfo (pfx->image, &colour);
/* "gray" is both a colorspace and a named colour. */
IsGray = (LocaleCompare (ColSp, "gray") == 0) ? MagickTrue : MagickFalse;
IsIcc = (LocaleCompare (ColSp, "icc-color") == 0) ? MagickTrue : MagickFalse;
IsDev = (LocaleNCompare (ColSp, "device-", 7) == 0) ? MagickTrue : MagickFalse;
/* QueryColorCompliance will raise a warning if it isn't a colour, so we discard any exceptions.
*/
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, dummy_exception) || IsGray) {
ssize_t type = ParseCommandOption (MagickColorspaceOptions, MagickFalse, ColSp);
if (type >= 0 || IsIcc || IsDev) {
char * q = pfx->pex + pfx->lenToken;
while (isspace((int) ((unsigned char) *q))) q++;
if (*q == '(') {
size_t lenfun;
char sFunc[MagickPathExtent];
while (*q && *q != ')') q++;
lenfun = (size_t) (q - pfx->pex + 1);
if (lenfun > MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"lenfun too long", "'%g' at '%s'",
(double) lenfun, SetShortExp(pfx));
dummy_exception = DestroyExceptionInfo (dummy_exception);
return 0;
}
(void) CopyMagickString (sFunc, pfx->pex, lenfun+1);
if (QueryColorCompliance (sFunc, AllCompliance, &colour, dummy_exception)) {
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return lenfun;
}
}
}
if (!IsGray) {
dummy_exception = DestroyExceptionInfo (dummy_exception);
return 0;
}
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
dummy_exception = DestroyExceptionInfo (dummy_exception);
return strlen (pfx->token);
}
static ssize_t inline GetHexColour (FxInfo * pfx, fxFltType *v0, fxFltType *v1, fxFltType *v2)
/* Returns number of characters to swallow.
Negative return means it starts with '#', but invalid hex number.
*/
{
char * p;
size_t len;
PixelInfo colour;
if (*pfx->pex != '#') return 0;
/* find end of hex digits. */
p = pfx->pex + 1;
while (isxdigit ((int)*p)) p++;
if (isalpha ((int)*p)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return -1;
}
len = (size_t) (p - pfx->pex);
if (len < 1) return 0;
if (len >= MaxTokenLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Hex colour too long at", "'%s'",
SetShortExp(pfx));
return -1;
}
(void) CopyMagickString (pfx->token, pfx->pex, len+1);
(void) GetPixelInfo (pfx->image, &colour);
if (!QueryColorCompliance (pfx->token, AllCompliance, &colour, pfx->exception)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"QueryColorCompliance rejected", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return -1;
}
*v0 = colour.red / QuantumRange;
*v1 = colour.green / QuantumRange;
*v2 = colour.blue / QuantumRange;
return (ssize_t) len;
}
static MagickBooleanType GetFunction (FxInfo * pfx, FunctionE fe)
{
/* A function, so get open-parens, n args, close-parens
*/
const char * funStr = Functions[fe-FirstFunc].str;
int nArgs = Functions[fe-FirstFunc].nArgs;
char chLimit = ')';
char expChLimit = ')';
const char *strLimit = ",)";
OperatorE pushOp = oOpenParen;
char * pExpStart;
int lenExp = 0;
int FndArgs = 0;
int ndx0 = NULL_ADDRESS, ndx1 = NULL_ADDRESS, ndx2 = NULL_ADDRESS, ndx3 = NULL_ADDRESS;
MagickBooleanType coordQual = MagickFalse;
PixelChannel chQual = NO_CHAN_QUAL;
ImgAttrE iaQual = aNull;
pfx->pex += pfx->lenToken;
if (fe == fP) {
char p = PeekChar (pfx);
if (p=='{') {
(void) ExpectChar (pfx, '{');
pushOp = oOpenBrace;
strLimit = ",}";
chLimit = '}';
expChLimit = '}';
} else if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fU) {
char p = PeekChar (pfx);
if (p=='[') {
(void) ExpectChar (pfx, '[');
pushOp = oOpenBracket;
strLimit = ",]";
chLimit = ']';
expChLimit = ']';
} else {
nArgs = 0;
chLimit = ']';
expChLimit = ']';
}
} else if (fe == fV || fe == fS) {
nArgs = 0;
pushOp = oOpenBracket;
chLimit = ']';
expChLimit = ']';
} else {
if (!ExpectChar (pfx, '(')) return MagickFalse;
}
if (!PushOperatorStack (pfx, pushOp)) return MagickFalse;
pExpStart = pfx->pex;
ndx0 = pfx->usedElements;
if (fe==fDo) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx1+1 */
}
while (nArgs > 0) {
int FndOne = 0;
if (TranslateStatementList (pfx, strLimit, &chLimit)) {
FndOne = 1;
} else {
/* Maybe don't break because other expressions may be not empty. */
if (!chLimit) break;
if (fe == fP || fe == fS|| fe == fIf) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndOne = 1;
}
}
if (strchr (strLimit, chLimit)==NULL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected one of '%s' after expression but found '%c' at '%s'",
funStr, strLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (FndOne) {
FndArgs++;
nArgs--;
}
switch (FndArgs) {
case 1:
ndx1 = pfx->usedElements;
if (fe==fWhile) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fDo) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2+1 */
} else if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx2 + 1 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from if() */
}
break;
case 2:
ndx2 = pfx->usedElements;
if (fe==fWhile) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0);
} else if (fe==fDo) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx0 + 1);
} else if (fe==fFor) {
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS); /* address will be ndx3 */
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue; /* we may need return from for() */
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
} else if (fe==fIf) {
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS); /* address will be ndx3 */
}
break;
case 3:
if (fe==fFor) {
pfx->Elements[pfx->usedElements-1].DoPush = MagickFalse;
(void) AddAddressingElement (pfx, rGoto, ndx1);
}
ndx3 = pfx->usedElements;
break;
default:
break;
}
if (chLimit == expChLimit) {
lenExp = pfx->pex - pExpStart - 1;
break;
}
} /* end while args of a function */
if (chLimit && chLimit != expChLimit && chLimit != ',' ) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected '%c', found '%c' at '%s'",
funStr, expChLimit, chLimit ? chLimit : ' ', SetShortExp(pfx));
return MagickFalse;
}
if (fe == fP || fe == fS || fe == fU) {
while (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) AddElement (pfx, (fxFltType) 0, oNull);
FndArgs++;
}
}
if (FndArgs > Functions[fe-FirstFunc].nArgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found '%i' at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
return MagickFalse;
}
if (FndArgs < Functions[fe-FirstFunc].nArgs) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s' expected %i arguments, found too few (%i) at '%s'",
funStr, Functions[fe-FirstFunc].nArgs, FndArgs, SetShortExp(pfx));
return MagickFalse;
}
if (fe != fS && fe != fV && FndArgs == 0 && Functions[fe-FirstFunc].nArgs == 0) {
/* This is for "rand()" and similar. */
chLimit = expChLimit;
if (!ExpectChar (pfx, ')')) return MagickFalse;
}
if (chLimit != expChLimit) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', arguments don't end with '%c' at '%s'",
funStr, expChLimit, SetShortExp(pfx));
return MagickFalse;
}
if (!PopOprOpenParen (pfx, pushOp)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' tos not '%s' at '%s'",
funStr, Operators[pushOp].str, SetShortExp(pfx));
return MagickFalse;
}
if (IsQualifier (pfx)) {
if (fe == fU || fe == fV || fe == fS) {
coordQual = (GetCoordQualifier (pfx, fe) == 1) ? MagickTrue : MagickFalse;
if (coordQual) {
/* Remove last element, which should be fP */
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->oprNum != fP) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: For function", "'%s' last element not 'p' at '%s'",
funStr, SetShortExp(pfx));
return MagickFalse;
}
chQual = pel->ChannelQual;
expChLimit = (pel->IsRelative) ? ']' : '}';
pfx->usedElements--;
if (fe == fU) fe = fUP;
else if (fe == fV) fe = fVP;
else if (fe == fS) fe = fSP;
funStr = Functions[fe-FirstFunc].str;
}
}
if ( chQual == NO_CHAN_QUAL &&
(fe == fP || fe == fS || fe == fSP || fe == fU || fe == fUP || fe == fV || fe == fVP)
)
{
chQual = GetChannelQualifier (pfx, fe);
}
if (chQual == NO_CHAN_QUAL && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p.mean" etc. */
iaQual = GetImgAttrQualifier (pfx, fe);
}
if (IsQualifier (pfx) && chQual == NO_CHAN_QUAL && iaQual != aNull) {
chQual = GetChannelQualifier (pfx, fe);
}
if (coordQual && iaQual != aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', can't have qualifiers 'p' and image attribute '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == NO_CHAN_QUAL && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!coordQual && chQual == CompositePixelChannel && iaQual == aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"For function", "'%s', bad compsite qualifier '%s' at '%s'",
funStr, pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (chQual == HUE_CHANNEL || chQual == SAT_CHANNEL || chQual == LIGHT_CHANNEL) {
pfx->NeedHsl = MagickTrue;
if (iaQual >= FirstImgAttr && iaQual < aNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Can't have image attribute with HLS qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
}
}
if (fe==fWhile) {
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fDo) {
pfx->Elements[ndx0].EleNdx = ndx1+1;
pfx->Elements[ndx1].EleNdx = ndx2+1;
} else if (fe==fFor) {
pfx->Elements[ndx2].EleNdx = ndx3;
} else if (fe==fIf) {
pfx->Elements[ndx1].EleNdx = ndx2 + 1;
pfx->Elements[ndx2].EleNdx = ndx3;
} else {
if (fe == fU && iaQual == aNull) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->type == etConstant && pel->val == 0.0) {
pfx->usedElements--;
fe = fU0;
}
}
(void) AddElement (pfx, (fxFltType) 0, fe);
if (fe == fP || fe == fU || fe == fU0 || fe == fUP ||
fe == fV || fe == fVP || fe == fS || fe == fSP)
{
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->IsRelative = (expChLimit == ']' ? MagickTrue : MagickFalse);
if (chQual >= 0) pel->ChannelQual = chQual;
if (iaQual != aNull && (fe == fU || fe == fV || fe == fS)) {
/* Note: we don't allow "p[2,3].mean" or "p.mean" etc. */
pel->ImgAttrQual = iaQual;
}
}
}
if (pExpStart && lenExp) {
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
pel->pExpStart = pExpStart;
pel->lenExp = lenExp;
}
if (fe == fDebug)
pfx->ContainsDebug = MagickTrue;
return MagickTrue;
}
static MagickBooleanType IsStealth (int op)
{
return (op == fU0 || op == fUP || op == fSP || op == fVP ||
(op >= FirstCont && op <= rNull) ? MagickTrue : MagickFalse
);
}
static MagickBooleanType GetOperand (
FxInfo * pfx, MagickBooleanType * UserSymbol, MagickBooleanType * NewUserSymbol, int * UserSymNdx,
MagickBooleanType * needPopAll)
{
*NewUserSymbol = *UserSymbol = MagickFalse;
*UserSymNdx = NULL_ADDRESS;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) GetToken (pfx);
if (pfx->lenToken==0) {
/* Try '(' or unary prefix
*/
OperatorE op = GetLeadingOp (pfx);
if (op==oOpenParen) {
char chLimit = '\0';
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
if (!TranslateExpression (pfx, ")", &chLimit, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Empty expression in parentheses at", "'%s'",
SetShortExp(pfx));
}
if (chLimit != ')') {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'(' but no ')' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Top of opr stack should be '('. */
if (!PopOprOpenParen (pfx, oOpenParen)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bug: tos not '(' at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
} else if (OprIsUnaryPrefix (op)) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
pfx->pex++;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!GetOperand (pfx, UserSymbol, NewUserSymbol, UserSymNdx, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, bad operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"After unary, NewUserSymbol at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (*UserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, *UserSymNdx);
*UserSymNdx = NULL_ADDRESS;
*UserSymbol = MagickFalse;
*NewUserSymbol = MagickFalse;
}
(void) GetToken (pfx);
return MagickTrue;
} else if (*pfx->pex == '#') {
fxFltType v0=0, v1=0, v2=0;
ssize_t lenToken = GetHexColour (pfx, &v0, &v1, &v2);
if (lenToken < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad hex number at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
} else if (lenToken > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=lenToken;
}
return MagickTrue;
}
/* Try a constant number.
*/
{
char * tailptr;
ssize_t lenOptArt;
fxFltType val = strtold (pfx->pex, &tailptr);
if (pfx->pex != tailptr) {
pfx->pex = tailptr;
if (*tailptr) {
/* Could have "prefix" K, Ki, M etc.
See https://en.wikipedia.org/wiki/Metric_prefix
and https://en.wikipedia.org/wiki/Binary_prefix
*/
double Pow = 0.0;
const char Prefices[] = "yzafpnum.kMGTPEZY";
const char * pSi = strchr (Prefices, *tailptr);
if (pSi && *pSi != '.') Pow = (pSi - Prefices) * 3 - 24;
else if (*tailptr == 'c') Pow = -2;
else if (*tailptr == 'h') Pow = 2;
else if (*tailptr == 'k') Pow = 3;
if (Pow != 0.0) {
if (*(++pfx->pex) == 'i') {
val *= pow (2.0, Pow/0.3);
pfx->pex++;
} else {
val *= pow (10.0, Pow);
}
}
}
(void) AddElement (pfx, val, oNull);
return MagickTrue;
}
val = (fxFltType) 0;
lenOptArt = GetProperty (pfx, &val);
if (lenOptArt < 0) return MagickFalse;
if (lenOptArt > 0) {
(void) AddElement (pfx, val, oNull);
pfx->pex += lenOptArt;
return MagickTrue;
}
}
} /* end of lenToken==0 */
if (pfx->lenToken > 0) {
/* Try a constant
*/
{
ConstantE ce;
for (ce = (ConstantE)0; ce < cNull; ce=(ConstantE) (ce+1)) {
const char * ceStr = Constants[ce].str;
if (LocaleCompare (ceStr, pfx->token)==0) {
break;
}
}
if (ce != cNull) {
(void) AddElement (pfx, Constants[ce].val, oNull);
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
/* Try a function
*/
{
FunctionE fe;
for (fe = FirstFunc; fe < fNull; fe=(FunctionE) (fe+1)) {
const char * feStr = Functions[fe-FirstFunc].str;
if (LocaleCompare (feStr, pfx->token)==0) {
break;
}
}
if (fe == fV && pfx->ImgListLen < 2) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Symbol 'v' but fewer than two images at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (IsStealth (fe)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Function", "'%s' not permitted at '%s'",
pfx->token, SetShortExp(pfx));
}
if (fe == fDo || fe == fFor || fe == fIf || fe == fWhile) {
*needPopAll = MagickTrue;
}
if (fe != fNull) return (GetFunction (pfx, fe));
}
/* Try image attribute
*/
{
ImgAttrE ia = GetImgAttrToken (pfx);
if (ia != aNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, ia);
if (ImgAttrs[ia-FirstImgAttr].NeedStats==1) {
if (IsQualifier (pfx)) {
PixelChannel chQual = GetChannelQualifier (pfx, ia);
ElementT * pel;
if (chQual == NO_CHAN_QUAL) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad channel qualifier at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
/* Adjust the element */
pel = &pfx->Elements[pfx->usedElements-1];
pel->ChannelQual = chQual;
}
}
return MagickTrue;
}
}
/* Try symbol
*/
{
SymbolE se;
for (se = FirstSym; se < sNull; se=(SymbolE) (se+1)) {
const char * seStr = Symbols[se-FirstSym].str;
if (LocaleCompare (seStr, pfx->token)==0) {
break;
}
}
if (se != sNull) {
fxFltType val = 0;
(void) AddElement (pfx, val, se);
pfx->pex += pfx->lenToken;
if (se==sHue || se==sSaturation || se==sLightness) pfx->NeedHsl = MagickTrue;
return MagickTrue;
}
}
/* Try constant colour.
*/
{
fxFltType v0, v1, v2;
size_t ColLen = GetConstantColour (pfx, &v0, &v1, &v2);
if (ColLen > 0) {
(void) AddColourElement (pfx, v0, v1, v2);
pfx->pex+=ColLen;
return MagickTrue;
}
}
/* Try image artifact.
*/
{
const char *artifact;
artifact = GetImageArtifact (pfx->image, pfx->token);
if (artifact != (const char *) NULL) {
char * tailptr;
fxFltType val = strtold (artifact, &tailptr);
if (pfx->token == tailptr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Artifact", "'%s' has value '%s', not a number, at '%s'",
pfx->token, artifact, SetShortExp(pfx));
return MagickFalse;
}
(void) AddElement (pfx, val, oNull);
pfx->pex+=pfx->lenToken;
return MagickTrue;
}
}
/* Try user symbols. If it is, don't AddElement yet.
*/
if (TokenMaybeUserSymbol (pfx)) {
*UserSymbol = MagickTrue;
*UserSymNdx = FindUserSymbol (pfx, pfx->token);
if (*UserSymNdx == NULL_ADDRESS) {
*UserSymNdx = AddUserSymbol (pfx, pfx->pex, pfx->lenToken);
*NewUserSymbol = MagickTrue;
} else {
}
pfx->pex += pfx->lenToken;
return MagickTrue;
}
}
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
static MagickBooleanType inline IsRealOperator (OperatorE op)
{
return (op < oOpenParen || op > oCloseBrace) ? MagickTrue : MagickFalse;
}
static MagickBooleanType inline ProcessTernaryOpr (FxInfo * pfx, TernaryT * ptern)
/* Ternary operator "... ? ... : ..."
returns false iff we have exception
*/
{
if (pfx->OperatorStack[pfx->usedOprStack-1] == oQuery) {
if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrQuery = pfx->usedElements;
(void) AddAddressingElement (pfx, rIfZeroGoto, NULL_ADDRESS);
/* address will be one after the Colon address. */
}
else if (pfx->OperatorStack[pfx->usedOprStack-1] == oColon) {
if (ptern->addrQuery == NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Need '?' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Already have ':' in sub-expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
ptern->addrColon = pfx->usedElements;
pfx->Elements[pfx->usedElements-1].DoPush = MagickTrue;
(void) AddAddressingElement (pfx, rGoto, NULL_ADDRESS);
/* address will be after the subexpression */
}
return MagickTrue;
}
static MagickBooleanType GetOperator (
FxInfo * pfx,
MagickBooleanType * Assign, MagickBooleanType * Update, MagickBooleanType * IncrDecr)
{
OperatorE op;
size_t len = 0;
MagickBooleanType DoneIt = MagickFalse;
SkipSpaces (pfx);
for (op = (OperatorE)0; op != oNull; op=(OperatorE) (op+1)) {
const char * opStr = Operators[op].str;
len = strlen(opStr);
if (LocaleNCompare (opStr, pfx->pex, len)==0) {
break;
}
}
if (!IsRealOperator (op)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Not a real operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (op==oNull) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operator at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
*Assign = (op==oAssign) ? MagickTrue : MagickFalse;
*Update = OprInPlace (op);
*IncrDecr = (op == oPlusPlus || op == oSubSub) ? MagickTrue : MagickFalse;
/* while top of OperatorStack is not empty and is not open-parens or assign,
and top of OperatorStack is higher precedence than new op,
then move top of OperatorStack to Element list.
*/
while (pfx->usedOprStack > 0) {
OperatorE top = pfx->OperatorStack[pfx->usedOprStack-1];
int precTop, precNew;
if (top == oOpenParen || top == oAssign || OprInPlace (top)) break;
precTop = Operators[top].precedence;
precNew = Operators[op].precedence;
/* Assume left associativity.
If right assoc, this would be "<=".
*/
if (precTop < precNew) break;
(void) AddElement (pfx, (fxFltType) 0, top);
pfx->usedOprStack--;
}
/* If new op is close paren, and stack top is open paren,
remove stack top.
*/
if (op==oCloseParen) {
if (pfx->usedOprStack == 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but nothing on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (pfx->OperatorStack[pfx->usedOprStack-1] != oOpenParen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Found ')' but no '(' on stack at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
pfx->usedOprStack--;
DoneIt = MagickTrue;
}
if (!DoneIt) {
if (!PushOperatorStack (pfx, op)) return MagickFalse;
}
pfx->pex += len;
return MagickTrue;
}
static MagickBooleanType ResolveTernaryAddresses (FxInfo * pfx, TernaryT * ptern)
{
if (ptern->addrQuery == NULL_ADDRESS && ptern->addrColon == NULL_ADDRESS)
return MagickTrue;
if (ptern->addrQuery != NULL_ADDRESS && ptern->addrColon != NULL_ADDRESS) {
pfx->Elements[ptern->addrQuery].EleNdx = ptern->addrColon + 1;
pfx->Elements[ptern->addrColon].EleNdx = pfx->usedElements;
ptern->addrQuery = NULL_ADDRESS;
ptern->addrColon = NULL_ADDRESS;
} else if (ptern->addrQuery != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'?' with no corresponding ':'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
} else if (ptern->addrColon != NULL_ADDRESS) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"':' with no corresponding '?'", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateExpression (
FxInfo * pfx, const char * strLimit, char * chLimit, MagickBooleanType * needPopAll)
{
/* There should be only one New per expression (oAssign), but can be many Old.
*/
MagickBooleanType UserSymbol, NewUserSymbol;
int UserSymNdx0, UserSymNdx1;
MagickBooleanType
Assign = MagickFalse,
Update = MagickFalse,
IncrDecr = MagickFalse;
int StartEleNdx;
TernaryT ternary;
ternary.addrQuery = NULL_ADDRESS;
ternary.addrColon = NULL_ADDRESS;
pfx->teDepth++;
*chLimit = '\0';
StartEleNdx = pfx->usedElements-1;
if (StartEleNdx < 0) StartEleNdx = 0;
SkipSpaces (pfx);
if (!*pfx->pex) {
pfx->teDepth--;
return MagickFalse;
}
if (strchr(strLimit,*pfx->pex)!=NULL) {
*chLimit = *pfx->pex;
pfx->pex++;
pfx->teDepth--;
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx0, needPopAll)) return MagickFalse;
SkipSpaces (pfx);
/* Loop through Operator, Operand, Operator, Operand, ...
*/
while (*pfx->pex && (!*strLimit || (strchr(strLimit,*pfx->pex)==NULL))) {
if (!GetOperator (pfx, &Assign, &Update, &IncrDecr)) return MagickFalse;
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected assignment after new UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted assignment to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (!UserSymbol && Update) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Attempted update to non-UserSymbol", "'%s' at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && (Assign || Update) && !IncrDecr) {
if (!TranslateExpression (pfx, strLimit, chLimit, needPopAll)) return MagickFalse;
if (!*pfx->pex) break;
if (!*strLimit) break;
if (strchr(strLimit,*chLimit)!=NULL) break;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
UserSymNdx0 = NULL_ADDRESS;
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (UserSymbol) {
while (TopOprIsUnaryPrefix (pfx)) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
(void) AddElement (pfx, (fxFltType) 0, op);
pfx->usedOprStack--;
}
}
if (!ProcessTernaryOpr (pfx, &ternary)) return MagickFalse;
if (ternary.addrColon != NULL_ADDRESS) {
if (!TranslateExpression (pfx, ",);", chLimit, needPopAll)) return MagickFalse;
break;
}
UserSymbol = NewUserSymbol = MagickFalse;
if (!*pfx->pex) break;
if (*strLimit && (strchr(strLimit,*pfx->pex)!=NULL) ) break;
if (IncrDecr) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"'++' and '--' must be the final operators in an expression at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
if (!GetOperand (pfx, &UserSymbol, &NewUserSymbol, &UserSymNdx1, needPopAll)) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Expected operand at", "'%s'",
SetShortExp(pfx));
return MagickFalse;
}
SkipSpaces (pfx);
if (NewUserSymbol && !Assign) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' after non-assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
if (UserSymbol && !NewUserSymbol) {
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx1);
UserSymNdx1 = NULL_ADDRESS;
}
UserSymNdx0 = UserSymNdx1;
}
if (UserSymbol && !Assign && !Update && UserSymNdx0 != NULL_ADDRESS) {
ElementT * pel;
if (NewUserSymbol) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"NewUserSymbol", "'%s' needs assignment operator at '%s'",
pfx->token, SetShortExp(pfx));
return MagickFalse;
}
(void) AddAddressingElement (pfx, rCopyFrom, UserSymNdx0);
pel = &pfx->Elements[pfx->usedElements-1];
pel->DoPush = MagickTrue;
}
if (*pfx->pex && !*chLimit && (strchr(strLimit,*pfx->pex)!=NULL)) {
*chLimit = *pfx->pex;
pfx->pex++;
}
while (pfx->usedOprStack) {
OperatorE op = pfx->OperatorStack[pfx->usedOprStack-1];
if (op == oOpenParen || op == oOpenBracket || op == oOpenBrace) {
break;
}
if ( (op==oAssign && !Assign) || (OprInPlace(op) && !Update) ) {
break;
}
pfx->usedOprStack--;
(void) AddElement (pfx, (fxFltType) 0, op);
if (op == oAssign) {
/* Adjust last element, by deletion and add.
*/
pfx->usedElements--;
(void) AddAddressingElement (pfx, rCopyTo, UserSymNdx0);
break;
} else if (OprInPlace (op)) {
/* Modify latest element.
*/
pfx->Elements[pfx->usedElements-1].EleNdx = UserSymNdx0;
break;
}
}
(void) ResolveTernaryAddresses (pfx, &ternary);
pfx->teDepth--;
if (!pfx->teDepth && *needPopAll) {
(void) AddAddressingElement (pfx, rZerStk, NULL_ADDRESS);
*needPopAll = MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateStatement (FxInfo * pfx, char * strLimit, char * chLimit)
{
MagickBooleanType NeedPopAll = MagickFalse;
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
if (!TranslateExpression (pfx, strLimit, chLimit, &NeedPopAll)) {
return MagickFalse;
}
if (pfx->usedElements && *chLimit==';') {
/* FIXME: not necessarily the last element,
but the last _executed_ element, eg "goto" in a "for()".,
Pending a fix, we will use rZerStk.
*/
ElementT * pel = &pfx->Elements[pfx->usedElements-1];
if (pel->DoPush) pel->DoPush = MagickFalse;
}
return MagickTrue;
}
static MagickBooleanType TranslateStatementList (FxInfo * pfx, const char * strLimit, char * chLimit)
{
#define MAX_SLIMIT 10
char sLimits[MAX_SLIMIT];
SkipSpaces (pfx);
if (!*pfx->pex) return MagickFalse;
(void) CopyMagickString (sLimits, strLimit, MAX_SLIMIT-1);
if (strchr(strLimit,';')==NULL)
(void) ConcatenateMagickString (sLimits, ";", MAX_SLIMIT);
for (;;) {
if (!TranslateStatement (pfx, sLimits, chLimit)) return MagickFalse;
if (!*pfx->pex) break;
if (*chLimit != ';') {
break;
}
}
return MagickTrue;
}
/*--------------------------------------------------------------------
Run-time
*/
static MagickBooleanType CollectStatistics (FxInfo * pfx)
{
Image * img = GetFirstImageInList (pfx->image);
size_t imgNum=0;
pfx->statistics = (ChannelStatistics**) AcquireMagickMemory (pfx->ImgListLen * sizeof (ChannelStatistics *));
if (!pfx->statistics) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"Statistics", "%lu",
pfx->ImgListLen);
return MagickFalse;
}
for (;;) {
int ch;
ChannelStatistics * cs = GetImageStatistics (img, pfx->exception);
pfx->statistics[imgNum] = cs;
for (ch=0; ch <= (int) MaxPixelChannels; ch++) {
cs[ch].mean *= QuantumScale;
cs[ch].median *= QuantumScale;
cs[ch].maxima *= QuantumScale;
cs[ch].minima *= QuantumScale;
cs[ch].standard_deviation *= QuantumScale;
cs[ch].kurtosis *= QuantumScale;
cs[ch].skewness *= QuantumScale;
cs[ch].entropy *= QuantumScale;
}
if (++imgNum == pfx->ImgListLen) break;
img = GetNextImageInList (img);
assert (img != (Image *) NULL);
}
return MagickTrue;
}
static MagickBooleanType inline PushVal (FxInfo * pfx, fxRtT * pfxrt, fxFltType val, int addr)
{
if (pfxrt->usedValStack >=pfxrt->numValStack) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack overflow at addr=", "%i",
addr);
return MagickFalse;
}
pfxrt->ValStack[pfxrt->usedValStack++] = val;
return MagickTrue;
}
static inline fxFltType PopVal (FxInfo * pfx, fxRtT * pfxrt, int addr)
{
if (pfxrt->usedValStack <= 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ValStack underflow at addr=", "%i",
addr);
return (fxFltType) 0;
}
return pfxrt->ValStack[--pfxrt->usedValStack];
}
static fxFltType inline ImageStat (
FxInfo * pfx, ssize_t ImgNum, PixelChannel channel, ImgAttrE ia)
{
ChannelStatistics * cs = NULL;
assert (channel >= 0 && channel <= MaxPixelChannels);
if (pfx->NeedStats) cs = pfx->statistics[ImgNum];
switch (ia) {
case aDepth:
return (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
case aExtent:
return (fxFltType) GetBlobSize (pfx->image);
break;
case aKurtosis:
return cs[channel].kurtosis;
break;
case aMaxima:
return cs[channel].maxima;
break;
case aMean:
return cs[channel].mean;
break;
case aMedian:
return cs[channel].median;
break;
case aMinima:
return cs[channel].minima;
break;
case aPage:
/* Do nothing */
break;
case aPageWid:
return (fxFltType) pfx->Images[ImgNum]->page.width;
case aPageHt:
return (fxFltType) pfx->Images[ImgNum]->page.height;
case aPageX:
return (fxFltType) pfx->Images[ImgNum]->page.x;
case aPageY:
return (fxFltType) pfx->Images[ImgNum]->page.y;
case aPrintsize:
/* Do nothing */
break;
case aPrintsizeX:
return (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.x) * pfx->Images[ImgNum]->columns;
case aPrintsizeY:
return (fxFltType) PerceptibleReciprocal (pfx->Images[ImgNum]->resolution.y) * pfx->Images[ImgNum]->rows;
case aQuality:
return (fxFltType) pfx->Images[ImgNum]->quality;
case aRes:
/* Do nothing */
break;
case aResX:
return pfx->Images[ImgNum]->resolution.x;
case aResY:
return pfx->Images[ImgNum]->resolution.y;
case aSkewness:
return cs[channel].skewness;
case aStdDev:
return cs[channel].standard_deviation;
case aH:
return (fxFltType) pfx->Images[ImgNum]->rows;
case aN:
return (fxFltType) pfx->ImgListLen;
case aT: /* image index in list */
return (fxFltType) ImgNum;
case aW:
return (fxFltType) pfx->Images[ImgNum]->columns;
case aZ:
return (fxFltType) GetImageDepth (pfx->Images[ImgNum], pfx->exception);
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Unknown ia=", "%i",
ia);
}
return -99.0;
}
static fxFltType inline FxGcd (fxFltType x, fxFltType y, const size_t depth)
{
#define FxMaxFunctionDepth 200
if (x < y)
return (FxGcd (y, x, depth+1));
if ((fabs((double) y) < 0.001) || (depth >= FxMaxFunctionDepth))
return (x);
return (FxGcd (y, x-y*floor((double) (x/y)), depth+1));
}
static ssize_t inline ChkImgNum (FxInfo * pfx, fxFltType f)
{
ssize_t i = (ssize_t) floor ((double) f + 0.5);
if (i < 0) i += pfx->ImgListLen;
if (i < 0 || (size_t)i >= pfx->ImgListLen) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"ImgNum", "%lu bad for ImgListLen %lu",
i, pfx->ImgListLen);
}
return i;
}
#define WHICH_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL) ? CompositePixelChannel : \
(pel->ChannelQual == THIS_CHANNEL) ? channel : pel->ChannelQual
#define WHICH_NON_ATTR_CHAN \
(pel->ChannelQual == NO_CHAN_QUAL || \
pel->ChannelQual == THIS_CHANNEL || \
pel->ChannelQual == CompositePixelChannel \
) ? (channel == CompositePixelChannel ? RedPixelChannel: channel) \
: pel->ChannelQual
static fxFltType GetHslFlt (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy,
int channel)
{
Image * img = pfx->Images[ImgNum];
double red, green, blue;
double hue=0, saturation=0, lightness=0;
MagickBooleanType okay = MagickTrue;
if(!InterpolatePixelChannel (img, pfx->Views[ImgNum], RedPixelChannel, img->interpolate,
(double) fx, (double) fy, &red, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Views[ImgNum], GreenPixelChannel, img->interpolate,
(double) fx, (double) fy, &green, pfx->exception)) okay = MagickFalse;
if(!InterpolatePixelChannel (img, pfx->Views[ImgNum], BluePixelChannel, img->interpolate,
(double) fx, (double) fy, &blue, pfx->exception)) okay = MagickFalse;
if (!okay)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetHslFlt failure", "%lu %Lg,%Lg %i", ImgNum, fx, fy, channel);
ConvertRGBToHSL (
red, green, blue,
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType GetHslInt (FxInfo * pfx, ssize_t ImgNum, const ssize_t imgx, const ssize_t imgy, int channel)
{
Image * img = pfx->Images[ImgNum];
double hue=0, saturation=0, lightness=0;
const Quantum * p = GetCacheViewVirtualPixels (pfx->Views[ImgNum], imgx, imgy, 1, 1, pfx->exception);
if (!p)
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetHslInt failure", "%lu %li,%li %i", ImgNum, imgx, imgy, channel);
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
if (channel == HUE_CHANNEL) return hue;
if (channel == SAT_CHANNEL) return saturation;
if (channel == LIGHT_CHANNEL) return lightness;
return 0.0;
}
static fxFltType inline GetIntensity (FxInfo * pfx, ssize_t ImgNum, const fxFltType fx, const fxFltType fy)
{
Quantum
quantum_pixel[MaxPixelChannels];
PixelInfo
pixelinf;
Image * img = pfx->Images[ImgNum];
(void) GetPixelInfo (img, &pixelinf);
if (!InterpolatePixelInfo (img, pfx->Views[pfx->ImgNum], img->interpolate,
(double) fx, (double) fy, &pixelinf, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"GetIntensity failure", "%lu %Lg,%Lg", ImgNum, fx, fy);
}
SetPixelViaPixelInfo (img, &pixelinf, quantum_pixel);
return QuantumScale * GetPixelIntensity (img, quantum_pixel);
}
static MagickBooleanType ExecuteRPN (FxInfo * pfx, fxRtT * pfxrt, fxFltType *result,
const PixelChannel channel, const ssize_t imgx, const ssize_t imgy)
{
const Quantum * p = pfxrt->thisPixel;
fxFltType regA=0, regB=0, regC=0, regD=0, regE=0;
Image * img = pfx->image;
ChannelStatistics * cs = NULL;
double hue=0, saturation=0, lightness=0;
int i;
/* For -fx, this sets p to ImgNum 0.
for %[fx:...], this sets p to the currrent image.
Similarly img.
*/
if (!p) p = GetCacheViewVirtualPixels (
pfx->Views[pfx->ImgNum], imgx, imgy, 1, 1, pfx->exception);
if (pfx->NeedStats) {
cs = pfx->statistics[pfx->ImgNum];
}
/* Folllowing is only for expressions like "saturation", with no image specifier.
*/
if (pfx->NeedHsl) {
ConvertRGBToHSL (
GetPixelRed (img, p), GetPixelGreen (img, p), GetPixelBlue (img, p),
&hue, &saturation, &lightness);
}
for (i=0; i < pfx->usedElements; i++) {
ElementT *pel = &pfx->Elements[i];
switch (pel->nArgs) {
case 0:
break;
case 1:
regA = PopVal (pfx, pfxrt, i);
break;
case 2:
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 3:
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 4:
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
case 5:
regE = PopVal (pfx, pfxrt, i);
regD = PopVal (pfx, pfxrt, i);
regC = PopVal (pfx, pfxrt, i);
regB = PopVal (pfx, pfxrt, i);
regA = PopVal (pfx, pfxrt, i);
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Too many args:", "%i", pel->nArgs);
break;
}
switch (pel->oprNum) {
case oAddEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] += regA);
break;
case oSubtractEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] -= regA);
break;
case oMultiplyEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= regA);
break;
case oDivideEq:
regA = (pfxrt->UserSymVals[pel->EleNdx] *= PerceptibleReciprocal((double) regA));
break;
case oPlusPlus:
regA = pfxrt->UserSymVals[pel->EleNdx]++;
break;
case oSubSub:
regA = pfxrt->UserSymVals[pel->EleNdx]--;
break;
case oAdd:
regA += regB;
break;
case oSubtract:
regA -= regB;
break;
case oMultiply:
regA *= regB;
break;
case oDivide:
regA *= PerceptibleReciprocal((double) regB);
break;
case oModulus:
regA = fmod ((double) regA, fabs(floor((double) regB+0.5)));
break;
case oUnaryPlus:
/* Do nothing. */
break;
case oUnaryMinus:
regA = -regA;
break;
case oLshift:
regA = (fxFltType) ((size_t)(regA+0.5) << (size_t)(regB+0.5));
break;
case oRshift:
regA = (fxFltType) ((size_t)(regA+0.5) >> (size_t)(regB+0.5));
break;
case oEq:
regA = fabs((double) (regA-regB)) < MagickEpsilon ? 1.0 : 0.0;
break;
case oNotEq:
regA = fabs((double) (regA-regB)) >= MagickEpsilon ? 1.0 : 0.0;
break;
case oLtEq:
regA = (regA <= regB) ? 1.0 : 0.0;
break;
case oGtEq:
regA = (regA >= regB) ? 1.0 : 0.0;
break;
case oLt:
regA = (regA < regB) ? 1.0 : 0.0;
break;
case oGt:
regA = (regA > regB) ? 1.0 : 0.0;
break;
case oLogAnd:
regA = (regA<=0) ? 0.0 : (regB > 0) ? 1.0 : 0.0;
break;
case oLogOr:
regA = (regA>0) ? 1.0 : (regB > 0.0) ? 1.0 : 0.0;
break;
case oLogNot:
regA = (regA==0) ? 1.0 : 0.0;
break;
case oBitAnd:
regA = (fxFltType) ((size_t)(regA+0.5) & (size_t)(regB+0.5));
break;
case oBitOr:
regA = (fxFltType) ((size_t)(regA+0.5) | (size_t)(regB+0.5));
break;
case oBitNot:
/* Old fx doesn't add 0.5. */
regA = (fxFltType) (~(size_t)(regA+0.5));
break;
case oPow:
regA = pow ((double) regA, (double) regB);
break;
case oQuery:
case oColon:
break;
case oOpenParen:
case oCloseParen:
case oOpenBracket:
case oCloseBracket:
case oOpenBrace:
case oCloseBrace:
break;
case oAssign:
pel->val = regA;
break;
case oNull: {
if (pel->type == etColourConstant) {
switch (channel) {
default:
case 0:
regA = pel->val;
break;
case 1:
regA = pel->val1;
break;
case 2:
regA = pel->val2;
break;
}
} else {
regA = pel->val;
}
break;
}
case fAbs:
regA = fabs ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ACOSH)
case fAcosh:
regA = acosh ((double) regA);
break;
#endif
case fAcos:
regA = acos ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_J1)
case fAiry:
if (regA==0) regA = 1.0;
else {
fxFltType gamma = 2.0 * j1 ((MagickPI*regA)) / (MagickPI*regA);
regA = gamma * gamma;
}
break;
#endif
case fAlt:
regA = (fxFltType) (((ssize_t) regA) & 0x01 ? -1.0 : 1.0);
break;
#if defined(MAGICKCORE_HAVE_ASINH)
case fAsinh:
regA = asinh ((double) regA);
break;
#endif
case fAsin:
regA = asin ((double) regA);
break;
#if defined(MAGICKCORE_HAVE_ATANH)
case fAtanh:
regA = atanh ((double) regA);
break;
#endif
case fAtan2:
regA = atan2 ((double) regA, (double) regB);
break;
case fAtan:
regA = atan ((double) regA);
break;
case fCeil:
regA = ceil ((double) regA);
break;
case fChannel:
switch (channel) {
case 0: break;
case 1: regA = regB; break;
case 2: regA = regC; break;
case 3: regA = regD; break;
case 4: regA = regE; break;
default: regA = 0.0;
}
break;
case fClamp:
if (regA < 0) regA = 0.0;
else if (regA > 1.0) regA = 1.0;
break;
case fCosh:
regA = cosh ((double) regA);
break;
case fCos:
regA = cos ((double) regA);
break;
case fDebug:
/* FIXME: debug() should give channel name. */
fprintf (stderr, "%s[%g,%g].%i: %s=%.*Lg\n",
img->filename, (double) imgx, (double) imgy,
channel, SetPtrShortExp (pfx, pel->pExpStart, (size_t) (pel->lenExp+1)),
pfx->precision, regA);
break;
case fDrc:
regA = regA / (regB*(regA-1.0) + 1.0);
break;
#if defined(MAGICKCORE_HAVE_ERF)
case fErf:
regA = erf ((double) regA);
break;
#endif
case fExp:
regA = exp ((double) regA);
break;
case fFloor:
regA = floor ((double) regA);
break;
case fGauss:
regA = exp((double) (-regA*regA/2.0))/sqrt(2.0*MagickPI);
break;
case fGcd:
if (!IsNaN(regA))
regA = FxGcd (regA, regB, 0);
break;
case fHypot:
regA = hypot ((double) regA, (double) regB);
break;
case fInt:
regA = floor ((double) regA);
break;
case fIsnan:
regA = (fxFltType) (!!IsNaN (regA));
break;
#if defined(MAGICKCORE_HAVE_J0)
case fJ0:
regA = j0 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJ1:
regA = j1 ((double) regA);
break;
#endif
#if defined(MAGICKCORE_HAVE_J1)
case fJinc:
if (regA==0) regA = 1.0;
else regA = 2.0 * j1 ((MagickPI*regA))/(MagickPI*regA);
break;
#endif
case fLn:
regA = log ((double) regA);
break;
case fLogtwo:
regA = log10((double) regA) / log10(2.0);
break;
case fLog:
regA = log10 ((double) regA);
break;
case fMax:
regA = (regA > regB) ? regA : regB;
break;
case fMin:
regA = (regA < regB) ? regA : regB;
break;
case fMod:
regA = regA - floor((double) (regA*PerceptibleReciprocal((double) regB)))*regB;
break;
case fNot:
regA = (fxFltType) (regA < MagickEpsilon);
break;
case fPow:
regA = pow ((double) regA, (double) regB);
break;
case fRand: {
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_ExecuteRPN)
#endif
regA = GetPseudoRandomValue (pfxrt->random_info);
break;
}
case fRound:
regA = floor ((double) regA + 0.5);
break;
case fSign:
regA = (regA < 0) ? -1.0 : 1.0;
break;
case fSinc:
regA = sin ((double) (MagickPI*regA)) / (MagickPI*regA);
break;
case fSinh:
regA = sinh ((double) regA);
break;
case fSin:
regA = sin ((double) regA);
break;
case fSqrt:
regA = sqrt ((double) regA);
break;
case fSquish:
regA = 1.0 / (1.0 + exp ((double) -regA));
break;
case fTanh:
regA = tanh ((double) regA);
break;
case fTan:
regA = tan ((double) regA);
break;
case fTrunc:
if (regA >= 0) regA = floor ((double) regA);
else regA = ceil ((double) regA);
break;
case fDo:
case fFor:
case fIf:
case fWhile:
break;
case fU: {
/* Note: 1 value is available, index into image list.
May have ImgAttr qualifier or channel qualifier or both.
*/
ssize_t ImgNum = ChkImgNum (pfx, regA);
regA = (fxFltType) 0;
if (ImgNum == 0) {
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pel->ImgAttrQual == aNull) {
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Views[0], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pech == HUE_CHANNEL || pech == SAT_CHANNEL ||
pech == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pech);
break;
} else if (pech == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (double) imgx, (double) imgy);
break;
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Views[0], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
} else {
/* we have an image atttribute */
regA = ImageStat (pfx, 0, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
} else {
/* We have non-zero ImgNum. */
if (pel->ImgAttrQual == aNull) {
const Quantum * pv;
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL)
{
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL)
{
regA = GetIntensity (pfx, ImgNum, (fxFltType) imgx, (fxFltType) imgy);
break;
}
}
pv = GetCacheViewVirtualPixels (
pfx->Views[ImgNum], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU can't get cache", "%lu", ImgNum);
break;
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
}
break;
}
case fU0: {
/* No args. No image attribute. We may have a ChannelQual.
If called from %[fx:...], ChannelQual will be CompositePixelChannel.
*/
Image * pimg = pfx->Images[0];
int pech = (int)pel->ChannelQual;
if (pech < 0) {
if (pech == NO_CHAN_QUAL || pech == THIS_CHANNEL) {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Views[0], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
} else if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, 0, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, 0, (fxFltType) imgx, (fxFltType) imgy);
}
} else {
if (pfx->ImgNum==0) {
regA = QuantumScale * p[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Views[0], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fU0 can't get cache", "%i", 0);
break;
}
regA = QuantumScale * pv[pimg->channel_map[WHICH_NON_ATTR_CHAN].offset];
}
}
break;
}
case fUP: {
/* 3 args are: ImgNum, x, y */
ssize_t ImgNum = ChkImgNum (pfx, regA);
fxFltType fx, fy;
if (pel->IsRelative) {
fx = imgx + regB;
fy = imgy + regC;
} else {
fx = regB;
fy = regC;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL
|| pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
break;
}
}
{
double v;
Image * imUP = pfx->Images[ImgNum];
if (! InterpolatePixelChannel (imUP, pfx->Views[ImgNum], WHICH_NON_ATTR_CHAN,
imUP->interpolate, (double) fx, (double) fy, &v, pfx->exception))
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fUP can't get interpolate", "%lu", ImgNum);
break;
}
regA = v * QuantumScale;
}
break;
}
case fS:
case fV: {
/* No args. */
ssize_t ImgNum = 1;
if (pel->oprNum == fS) ImgNum = pfx->ImgNum;
if (pel->ImgAttrQual == aNull) {
const Quantum * pv = GetCacheViewVirtualPixels (
pfx->Views[ImgNum], imgx, imgy, 1,1, pfx->exception);
if (!pv) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fV can't get cache", "%lu", ImgNum);
break;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslInt (pfx, ImgNum, imgx, imgy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, (double) imgx, (double) imgy);
break;
}
}
regA = QuantumScale *
pv[pfx->Images[ImgNum]->channel_map[WHICH_NON_ATTR_CHAN].offset];
} else {
regA = ImageStat (pfx, ImgNum, WHICH_ATTR_CHAN, pel->ImgAttrQual);
}
break;
}
case fP:
case fSP:
case fVP: {
/* 2 args are: x, y */
fxFltType fx, fy;
ssize_t ImgNum = pfx->ImgNum;
if (pel->oprNum == fVP) ImgNum = 1;
if (pel->IsRelative) {
fx = imgx + regA;
fy = imgy + regB;
} else {
fx = regA;
fy = regB;
}
if ((int)pel->ChannelQual < 0) {
if (pel->ChannelQual == HUE_CHANNEL || pel->ChannelQual == SAT_CHANNEL ||
pel->ChannelQual == LIGHT_CHANNEL) {
regA = GetHslFlt (pfx, ImgNum, fx, fy, pel->ChannelQual);
break;
} else if (pel->ChannelQual == INTENSITY_CHANNEL) {
regA = GetIntensity (pfx, ImgNum, fx, fy);
}
}
{
double v;
if (! InterpolatePixelChannel (pfx->Images[ImgNum], pfx->Views[ImgNum],
WHICH_NON_ATTR_CHAN, pfx->Images[ImgNum]->interpolate,
(double) fx, (double) fy, &v, pfx->exception)
)
{
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"fSP or fVP can't get interp", "%lu", ImgNum);
break;
}
regA = v * (fxFltType)QuantumScale;
}
break;
}
case fNull:
break;
case aDepth:
regA = (fxFltType) GetImageDepth (img, pfx->exception) / QuantumRange;
break;
case aExtent:
regA = (fxFltType) img->extent;
break;
case aKurtosis:
regA = cs[WHICH_ATTR_CHAN].kurtosis;
break;
case aMaxima:
regA = cs[WHICH_ATTR_CHAN].maxima;
break;
case aMean:
regA = cs[WHICH_ATTR_CHAN].mean;
break;
case aMedian:
regA = cs[WHICH_ATTR_CHAN].median;
break;
case aMinima:
regA = cs[WHICH_ATTR_CHAN].minima;
break;
case aPage:
break;
case aPageWid:
regA = (fxFltType) img->page.width;
break;
case aPageHt:
regA = (fxFltType) img->page.height;
break;
case aPageX:
regA = (fxFltType) img->page.x;
break;
case aPageY:
regA = (fxFltType) img->page.y;
break;
case aPrintsize:
break;
case aPrintsizeX:
regA = PerceptibleReciprocal (img->resolution.x) * img->columns;
break;
case aPrintsizeY:
regA = PerceptibleReciprocal (img->resolution.y) * img->rows;
break;
case aQuality:
regA = (fxFltType) img->quality;
break;
case aRes:
break;
case aResX:
regA = (fxFltType) img->resolution.x;
break;
case aResY:
regA = (fxFltType) img->resolution.y;
break;
case aSkewness:
regA = cs[WHICH_ATTR_CHAN].skewness;
break;
case aStdDev:
regA = cs[WHICH_ATTR_CHAN].standard_deviation;
break;
case aH: /* image->rows */
regA = (fxFltType) img->rows;
break;
case aN: /* image list length */
regA = (fxFltType) pfx->ImgListLen;
break;
case aT: /* image index in list */
regA = (fxFltType) pfx->ImgNum;
break;
case aW: /* image->columns */
regA = (fxFltType) img->columns;
break;
case aZ: /* image depth */
regA = (fxFltType) GetImageDepth (img, pfx->exception);
break;
case aNull:
break;
case sHue: /* of conversion to HSL */
regA = hue;
break;
case sIntensity:
regA = GetIntensity (pfx, pfx->ImgNum, (double) imgx, (double) imgy);
break;
case sLightness: /* of conversion to HSL */
regA = lightness;
break;
case sLuma: /* calculation */
case sLuminance: /* as Luma */
regA = QuantumScale * (0.212656 * GetPixelRed (img,p) +
0.715158 * GetPixelGreen (img,p) +
0.072186 * GetPixelBlue (img,p));
break;
case sSaturation: /* from conversion to HSL */
regA = saturation;
break;
case sA: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sB: /* blue */
regA = QuantumScale * GetPixelBlue (img, p);
break;
case sC: /* red (ie cyan) */
regA = QuantumScale * GetPixelCyan (img, p);
break;
case sG: /* green */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sI: /* current x-coordinate */
regA = (fxFltType) imgx;
break;
case sJ: /* current y-coordinate */
regA = (fxFltType) imgy;
break;
case sK: /* black of CMYK */
regA = QuantumScale * GetPixelBlack (img, p);
break;
case sM: /* green (ie magenta) */
regA = QuantumScale * GetPixelGreen (img, p);
break;
case sO: /* alpha */
regA = QuantumScale * GetPixelAlpha (img, p);
break;
case sR:
regA = QuantumScale * GetPixelRed (img, p);
break;
case sY:
regA = QuantumScale * GetPixelYellow (img, p);
break;
case sNull:
break;
case rGoto:
i = pel->EleNdx-1; /* -1 because 'for' loop will increment. */
break;
case rIfZeroGoto:
if (fabs((double) regA) < MagickEpsilon) i = pel->EleNdx-1;
break;
case rIfNotZeroGoto:
if (fabs((double) regA) > MagickEpsilon) i = pel->EleNdx-1;
break;
case rCopyFrom:
regA = pfxrt->UserSymVals[pel->EleNdx];
break;
case rCopyTo:
pfxrt->UserSymVals[pel->EleNdx] = regA;
break;
case rZerStk:
pfxrt->usedValStack = 0;
break;
case rNull:
break;
default:
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"pel->oprNum", "%i '%s' not yet implemented",
(int)pel->oprNum, OprStr(pel->oprNum));
break;
}
if (i < 0) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Bad run-time address", "%i", i);
}
if (pel->DoPush)
if (!PushVal (pfx, pfxrt, regA, i)) break;
}
if (pfxrt->usedValStack > 0) regA = PopVal (pfx, pfxrt, 9999);
*result = regA;
if (pfxrt->usedValStack != 0) {
fprintf (stderr, "ValStack not empty (%i)\n", pfxrt->usedValStack);
return MagickFalse;
}
return MagickTrue;
}
/* Following is substitute for FxEvaluateChannelExpression().
*/
MagickPrivate MagickBooleanType FxEvaluateChannelExpression (
FxInfo *pfx,
const PixelChannel channel, const ssize_t x, const ssize_t y,
double *result, ExceptionInfo *exception)
{
const int
id = GetOpenMPThreadId();
fxFltType ret;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Views != NULL);
assert (pfx->fxrts != NULL);
pfx->fxrts[id].thisPixel = NULL;
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &ret, channel, x, y)) {
(void) ThrowMagickException (
exception, GetMagickModule(), OptionError,
"ExcuteRPN failed", " ");
return MagickFalse;
}
*result = (double) ret;
return MagickTrue;
}
FxInfo *AcquireFxInfo (const Image * images, const char * expression, ExceptionInfo *exception)
{
char chLimit;
FxInfo * pfx = (FxInfo*) AcquireCriticalMemory (sizeof (*pfx));
memset (pfx, 0, sizeof (*pfx));
if (!InitFx (pfx, images, exception)) return NULL;
if (!BuildRPN (pfx)) return NULL;
pfx->pex = (char *)expression;
if (*expression == '@')
pfx->expression = FileToString (expression+1, ~0UL, exception);
else
pfx->expression = ConstantString (expression);
pfx->pex = pfx->expression;
pfx->teDepth = 0;
if (!TranslateStatementList (pfx, ";", &chLimit)) return NULL;
if (pfx->teDepth) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"Translate expression depth", "(%i) not 0",
pfx->teDepth);
return NULL;
}
if (chLimit != '\0' && chLimit != ';') {
fprintf (stderr, "AcquireFxInfo: TranslateExpression did not exhaust input '%s' chLimit='%c'\n",
pfx->pex, chLimit ? chLimit : ' ');
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), OptionError,
"AcquireFxInfo: TranslateExpression did not exhaust input at", "'%s'",
pfx->pex);
return NULL;
}
if (pfx->NeedStats && !pfx->statistics) {
if (!CollectStatistics (pfx)) return NULL;
}
if (pfx->DebugOpt) {
DumpTables (stderr);
DumpUserSymbols (pfx, stderr);
(void) DumpRPN (pfx, stderr);
}
{
size_t number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
ssize_t t;
pfx->fxrts = (fxRtT *)AcquireQuantumMemory (number_threads, sizeof(fxRtT));
if (!pfx->fxrts) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"fxrts", "%lu",
number_threads);
return NULL;
}
for (t=0; t < (ssize_t) number_threads; t++) {
if (!AllocFxRt (pfx, &pfx->fxrts[t])) {
(void) ThrowMagickException (
pfx->exception, GetMagickModule(), ResourceLimitFatalError,
"AllocFxRt t=", "%g",
(double) t);
return NULL;
}
}
}
return pfx;
}
FxInfo *DestroyFxInfo (FxInfo * pfx)
{
ssize_t t;
assert (pfx != NULL);
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Views != NULL);
assert (pfx->fxrts != NULL);
for (t=0; t < (ssize_t) GetMagickResourceLimit(ThreadResource); t++) {
DestroyFxRt (&pfx->fxrts[t]);
}
pfx->fxrts = (fxRtT *) RelinquishMagickMemory (pfx->fxrts);
DestroyRPN (pfx);
pfx->expression = DestroyString (pfx->expression);
pfx->pex = NULL;
(void) DeInitFx (pfx);
pfx = (FxInfo*) RelinquishMagickMemory(pfx);
return NULL;
}
/* Following is substitute for FxImage().
*/
MagickExport Image *FxImage (const Image *image, const char *expression,
ExceptionInfo *exception)
{
#define FxImageTag "FxNew/Image"
CacheView
*fx_view,
*image_view;
Image
*fx_image;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
FxInfo
*pfx;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
if (expression == (const char *) NULL)
return(CloneImage(image,0,0,MagickTrue,exception));
fx_image=CloneImage(image,0,0,MagickTrue,exception);
if (!fx_image) return NULL;
if (SetImageStorageClass(fx_image,DirectClass,exception) == MagickFalse) {
fx_image=DestroyImage(fx_image);
return NULL;
}
pfx = AcquireFxInfo (image, expression, exception);
if (!pfx) {
fx_image=DestroyImage(fx_image);
return NULL;
}
assert (pfx->image != NULL);
assert (pfx->Images != NULL);
assert (pfx->Views != NULL);
assert (pfx->fxrts != NULL);
status=MagickTrue;
progress=0;
image_view = AcquireVirtualCacheView (image, pfx->exception);
fx_view = AcquireAuthenticCacheView (fx_image, pfx->exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(dynamic) shared(progress,status) \
magick_number_threads(image,fx_image,fx_image->rows, \
pfx->ContainsDebug ? 0 : 1)
#endif
for (y=0; y < (ssize_t) fx_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
const Quantum
*magick_restrict p;
Quantum
*magick_restrict q;
ssize_t
x;
fxFltType
result = 0.0;
if (status == MagickFalse)
continue;
p = GetCacheViewVirtualPixels (image_view, 0, y, image->columns, 1, pfx->exception);
q = QueueCacheViewAuthenticPixels (fx_view, 0, y, fx_image->columns, 1, pfx->exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) {
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) fx_image->columns; x++) {
ssize_t i;
pfx->fxrts[id].thisPixel = (Quantum *)p;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel (image, i);
PixelTrait traits = GetPixelChannelTraits (image, channel);
PixelTrait fx_traits = GetPixelChannelTraits (fx_image, channel);
if ((traits == UndefinedPixelTrait) ||
(fx_traits == UndefinedPixelTrait))
continue;
if ((fx_traits & CopyPixelTrait) != 0) {
SetPixelChannel (fx_image, channel, p[i], q);
continue;
}
if (!ExecuteRPN (pfx, &pfx->fxrts[id], &result, channel, x, y)) {
status=MagickFalse;
continue;
}
q[i] = ClampToQuantum ((MagickRealType) (QuantumRange*result));
}
p+=GetPixelChannels (image);
q+=GetPixelChannels (fx_image);
}
if (SyncCacheViewAuthenticPixels(fx_view, pfx->exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed = SetImageProgress (image, FxImageTag, progress, image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
fx_view = DestroyCacheView (fx_view);
image_view = DestroyCacheView (image_view);
/* Before destroying the user symbol values, dump them to stderr.
*/
if (pfx->DebugOpt && pfx->usedUserSymbols) {
int t, i;
char UserSym[MagickPathExtent];
fprintf (stderr, "User symbols (%i):\n", pfx->usedUserSymbols);
for (t=0; t < (int) GetMagickResourceLimit(ThreadResource); t++) {
for (i = 0; i < (int) pfx->usedUserSymbols; i++) {
fprintf (stderr, "th=%i us=%i '%s': %.*Lg\n",
t, i, NameOfUserSym (pfx, i, UserSym), pfx->precision, pfx->fxrts[t].UserSymVals[i]);
}
}
}
if (pfx->exception->severity != UndefinedException) {
status = MagickFalse;
}
if (status == MagickFalse)
fx_image = DestroyImage (fx_image);
pfx = DestroyFxInfo (pfx);
return(fx_image);
}
|
jacobi-omp3.c | /*
* BSD 2-Clause License
*
* Copyright (c) 2020, Alessandro Capotondi
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
/**
* @file jacobi.c
* @author Alessandro Capotondi
* @date 27 Mar 2020
* @brief This code solves the steady state heat equation on a rectangular region.
* This code solves the steady state heat equation on a rectangular region.
* The sequential version of this program needs approximately
* 18/epsilon iterations to complete.
* The physical region, and the boundary conditions, are suggested
* by this diagram;
* W = 0
* +------------------+
* | |
* W = 100 | | W = 100
* | |
* +------------------+
* W = 100
* The region is covered with a grid of M by N nodes, and an N by N
* array W is used to record the temperature. The correspondence between
* array indices and locations in the region is suggested by giving the
* indices of the four corners:
* I = 0
* [0][0]-------------[0][N-1]
* | |
* J = 0 | | J = N-1
* | |
* [M-1][0]-----------[M-1][N-1]
* I = M-1
* The steady state solution to the discrete heat equation satisfies the
* following condition at an interior grid point:
* W[Central] = (1/4) * ( W[North] + W[South] + W[East] + W[West] )
* where "Central" is the index of the grid point, "North" is the index
* of its immediate neighbor to the "north", and so on.
*
* Given an approximate solution of the steady state heat equation, a
* "better" solution is given by replacing each interior point by the
* average of its 4 neighbors - in other words, by using the condition
* as an ASSIGNMENT statement:
* W[Central] <= (1/4) * ( W[North] + W[South] + W[East] + W[West] )
* If this process is repeated often enough, the difference between successive
* estimates of the solution will go to zero.
* This program carries out such an iteration, using a tolerance specified by
* the user, and writes the final estimate of the solution to a file that can
* be used for graphic processing.
* icensing:
* This code is distributed under the GNU LGPL license.
* odified:
* 18 October 2011
* uthor:
* Original C version by Michael Quinn.
* This C version by John Burkardt.
* eference:
* Michael Quinn,
* Parallel Programming in C with MPI and OpenMP,
* McGraw-Hill, 2004,
* ISBN13: 978-0071232654,
* LC: QA76.73.C15.Q55.
* ocal parameters:
* Local, double DIFF, the norm of the change in the solution from one iteration
* to the next.
* Local, double MEAN, the average of the boundary values, used to initialize
* the values of the solution in the interior.
* Local, double U[M][N], the solution at the previous iteration.
* Local, double W[M][N], the solution computed at the latest iteration.
*
*
* @see https://en.wikipedia.org/wiki/Jacobi_method
* @see http://algo.ing.unimo.it/people/andrea/Didattica/HPC/index.html
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "utils.h"
static int N;
static int MAX_ITERATIONS;
static int SEED;
static double CONVERGENCE_THRESHOLD;
static FILE *data;
#define SEPARATOR "------------------------------------\n"
// Return the current time in seconds since the Epoch
double get_timestamp();
// Parse command line arguments to set solver parameters
void parse_arguments(int argc, char *argv[]);
// Run the Jacobi solver
// Returns the number of iterations performed
int run(double *restrict A, double *restrict xtmp)
{
int iter = 0, iterations_print = 1;
double err = 0.0;
do
{
err = 0.0;
#pragma omp target data map(to \
: A [0:N * N]) map(from \
: xtmp [0:N * N]) map(tofrom \
: err)
#pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU) map(to \
: A [0:N * N]) map(from \
: xtmp [0:N * N]) map(tofrom \
: err)
#pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU) reduction(max \
: err) schedule(static, 1)
for (int i = 1; i < N - 1; i++)
{
for (int j = 1; j < N - 1; j++)
{
xtmp[i * N + j] = 0.25 * (A[(i - 1) * N + j] + A[(i + 1) * N + j] + A[i * N + j - 1] + A[i * N + j + 1]);
err = fmax(err, fabs(xtmp[i * N + j] - A[i * N + j]));
}
}
#pragma omp target data map(from \
: A [0:N * N]) map(to \
: xtmp [0:N * N])
#pragma omp target teams num_teams(N / NTHREADS_GPU) thread_limit(NTHREADS_GPU) map(from \
: A [0:N * N]) map(to \
: xtmp [0:N * N])
#pragma omp distribute parallel for collapse(2) num_threads(NTHREADS_GPU) dist_schedule(static, NTHREADS_GPU) schedule(static, 1)
for (int i = 0; i < N; i++)
{
for (int j = 0; j < N; j++)
{
A[i * N + j] = xtmp[i * N + j];
}
}
iter++;
#ifdef DEBUG
if (iter == iterations_print)
{
printf(" %8d %f\n", iter, err);
iterations_print = 2 * iterations_print;
}
#endif
} while (err > CONVERGENCE_THRESHOLD && iter < MAX_ITERATIONS);
return iter;
}
int main(int argc, char *argv[])
{
parse_arguments(argc, argv);
double *A = malloc(N * N * sizeof(double));
double *xtmp = malloc(N * N * sizeof(double));
printf(SEPARATOR);
printf("Matrix size: %dx%d\n", N, N);
printf("Maximum iterations: %d\n", MAX_ITERATIONS);
printf("Convergence threshold: %lf\n", CONVERGENCE_THRESHOLD);
printf(SEPARATOR);
for (int ii = 0; ii < N; ii++)
{
for (int jj = 0; jj < N; jj++)
{
double f;
fread(&f, sizeof(double), 1, data);
A[ii * N + jj] = f;
}
}
// Run Jacobi solver
start_timer();
int itr = run(A, xtmp);
stop_timer();
printf("Iterations = %d\n", itr);
printf("Solver runtime = %lf ms\n", elapsed_ns() / 1E6);
if (itr == MAX_ITERATIONS)
printf("WARNING: solution did not converge\n");
printf(SEPARATOR);
free(A);
free(xtmp);
fclose(data);
return 0;
}
int parse_int(const char *str)
{
char *next;
int value = strtoul(str, &next, 10);
return strlen(next) ? -1 : value;
}
double parse_double(const char *str)
{
char *next;
double value = strtod(str, &next);
return strlen(next) ? -1 : value;
}
void parse_arguments(int argc, char *argv[])
{
// Set default values
N = 500;
MAX_ITERATIONS = 2000;
CONVERGENCE_THRESHOLD = 0.001;
SEED = 0;
for (int i = 1; i < argc; i++)
{
if (!strcmp(argv[i], "--convergence") || !strcmp(argv[i], "-c"))
{
if (++i >= argc || (CONVERGENCE_THRESHOLD = parse_double(argv[i])) < 0)
{
printf("Invalid convergence threshold\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--iterations") || !strcmp(argv[i], "-i"))
{
if (++i >= argc || (MAX_ITERATIONS = parse_int(argv[i])) < 0)
{
printf("Invalid number of iterations\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--norder") || !strcmp(argv[i], "-n"))
{
if (++i >= argc || (N = parse_int(argv[i])) < 0)
{
printf("Invalid matrix order\n");
exit(1);
}
}
else if (!strcmp(argv[i], "--help") || !strcmp(argv[i], "-h"))
{
printf("\n");
printf("Usage: ./jacobi [OPTIONS]\n\n");
printf("Options:\n");
printf(" -h --help Print this message\n");
printf(" -c --convergence C Set convergence threshold\n");
printf(" -i --iterations I Set maximum number of iterations\n");
printf(" -n --norder N Set maxtrix order (500 or 1000)\n");
printf("\n");
exit(0);
}
else
{
printf("Unrecognized argument '%s' (try '--help')\n", argv[i]);
exit(1);
}
}
if (N == 1000)
data = fopen("data/jacobi-1000.bin", "rb");
else if (N == 500)
data = fopen("data/jacobi-500.bin", "rb");
else
{
printf("Invalid matrix order\n");
exit(1);
}
}
|
tools.h | #ifndef YGGTOOLS_H_
#define YGGTOOLS_H_
#ifdef _MSC_VER
#ifndef _CRT_SECURE_NO_WARNINGS
#define _CRT_SECURE_NO_WARNINGS 1
#endif
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
#include <stdarg.h>
#include <errno.h>
#include <time.h>
#ifdef USE_OSR_YGG
struct complex_float{
float re;
float im;
};
struct complex_double{
double re;
double im;
};
struct complex_long_double{
long double re;
long double im;
};
typedef struct complex_float complex_float;
typedef struct complex_double complex_double;
typedef struct complex_long_double complex_long_double;
#define creal(x) x.re
#define crealf(x) x.re
#define creall(x) x.re
#define cimag(x) x.im
#define cimagf(x) x.im
#define cimagl(x) x.im
#else /*USE_YGG_OSR*/
#ifdef _MSC_VER
#ifdef __cplusplus
#include <complex>
typedef std::complex<float> complex_float;
typedef std::complex<double> complex_double;
typedef std::complex<long double> complex_long_double;
#ifndef creal
#define creal(x) x.real()
#define crealf(x) x.real()
#define creall(x) x.real()
#define cimag(x) x.imag()
#define cimagf(x) x.imag()
#define cimagl(x) x.imag()
#endif /*creal*/
#else /*__cplusplus*/
#include <complex.h>
typedef _Fcomplex complex_float;
typedef _Dcomplex complex_double;
typedef _Lcomplex complex_long_double;
#define print_complex(x) printf("%lf+%lfj\n", (double)(x._Val[0]), (double)(x._Val[1]))
#endif /*__cplusplus*/
#else // Unix
#ifdef __cplusplus
#include <complex>
typedef std::complex<float> complex_float;
typedef std::complex<double> complex_double;
typedef std::complex<long double> complex_long_double;
#ifndef creal
#define creal(x) x.real()
#define crealf(x) x.real()
#define creall(x) x.real()
#define cimag(x) x.imag()
#define cimagf(x) x.imag()
#define cimagl(x) x.imag()
#endif /*creal*/
#else /*__cplusplus*/
#include <complex.h>
typedef float _Complex complex_float;
typedef double _Complex complex_double;
typedef long double _Complex complex_long_double;
#endif /*__cplusplus*/
#endif /*Unix*/
#endif /*USE_YGG_OSR*/
#ifndef print_complex
#define print_complex(x) printf("%lf+%lfj\n", (double)creal(x), (double)cimag(x))
#endif
#ifdef __cplusplus /* If this is a C++ compiler, use C linkage */
extern "C" {
#endif
#include <math.h> // Required to prevent error when using mingw on windows
#ifdef _DEBUG
#undef _DEBUG
#include <Python.h>
#include <numpy/arrayobject.h>
#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#define _DEBUG
#else
#include <Python.h>
#include <numpy/arrayobject.h>
#include <numpy/ndarrayobject.h>
#include <numpy/npy_common.h>
#endif
/*! @brief Wrapper for a complex number with float components. */
typedef struct complex_float_t {
float re; //!< Real component
float im; //!< Imaginary component
} complex_float_t;
/*! @brief Wrapper for a complex number with double components. */
typedef struct complex_double_t {
double re; //!< Real component
double im; //!< Imaginary component
} complex_double_t;
/*! @brief Wrapper for a complex number with long double components. */
typedef struct complex_long_double_t {
long double re; //!< Real component
long double im; //!< Imaginary component
} complex_long_double_t;
// Platform specific
#ifdef _WIN32
#include "regex/regex_win32.h"
#include "getline_win32.h"
#else
#include "regex_posix.h"
#endif
#ifdef _MSC_VER
#include "windows_stdint.h" // Use local copy for MSVC support
// Prevent windows.h from including winsock.h
#ifndef WIN32_LEAN_AND_MEAN
#define WIN32_LEAN_AND_MEAN
#endif
#include <windows.h>
#include <process.h>
#define ygg_getpid _getpid
#define sleep(tsec) Sleep(1000*tsec)
#define usleep(usec) Sleep(usec/1000)
#else
#include <stdint.h>
#include <unistd.h>
#define ygg_getpid getpid
#endif
#define STRBUFF 100
/*! @brief Maximum message size. */
#ifdef IPCDEF
#define YGG_MSG_MAX 2048
#else
#define YGG_MSG_MAX 1048576
#endif
/*! @brief End of file message. */
#define YGG_MSG_EOF "EOF!!!"
/*! @brief End of client message. */
#define YGG_CLIENT_EOF "YGG_END_CLIENT"
/*! @brief Resonable size for buffer. */
#define YGG_MSG_BUF 2048
/*! @brief Sleep time in micro-seconds */
#define YGG_SLEEP_TIME ((int)250000)
/*! @brief Size for buffers to contain names of Python objects. */
#define PYTHON_NAME_SIZE 1000
/*! @brief Define old style names for compatibility. */
#define PSI_MSG_MAX YGG_MSG_MAX
#define PSI_MSG_BUF YGG_MSG_BUF
#define PSI_MSG_EOF YGG_MSG_EOF
#ifdef PSI_DEBUG
#define YGG_DEBUG PSI_DEBUG
#endif
static int _ygg_error_flag = 0;
/*! @brief Define macros to allow counts of variables. */
// https://codecraft.co/2014/11/25/variadic-macros-tricks/
#ifdef _MSC_VER
// https://stackoverflow.com/questions/48710758/how-to-fix-variadic-macro-related-issues-with-macro-overloading-in-msvc-mic
#define MSVC_BUG(MACRO, ARGS) MACRO ARGS // name to remind that bug fix is due to MSVC :-)
#define _GET_NTH_ARG_2(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, N, ...) N
#define _GET_NTH_ARG(...) MSVC_BUG(_GET_NTH_ARG_2, (__VA_ARGS__))
#define COUNT_VARARGS(...) _GET_NTH_ARG("ignored", ##__VA_ARGS__, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#define VA_MACRO(MACRO, ...) MSVC_BUG(CONCATE, (MACRO, COUNT_VARARGS(__VA_ARGS__)))(__VA_ARGS__)
#else
#define _GET_NTH_ARG(_1, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11, _12, _13, _14, N, ...) N
#define COUNT_VARARGS(...) _GET_NTH_ARG("ignored", ##__VA_ARGS__, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0)
#endif
#define UNUSED(arg) ((void)&(arg))
/*! @brief Memory to allow thread association to be set via macro. */
static int global_thread_id = -1;
#define ASSOCIATED_WITH_THREAD(COMM, THREAD) global_thread_id = THREAD; COMM; global_thread_id = -1;
#ifdef _OPENMP
#pragma omp threadprivate(global_thread_id)
#endif
/*!
@brief Get an unsigned long seed from the least significant 32bits of a pointer.
@param[in] ptr Pointer that should be turned into a seed.
@return Unsigned long seed.
*/
static inline
unsigned long ptr2seed(void *ptr) {
uint64_t v = (uint64_t)ptr;
unsigned long seed = (unsigned long)(v & 0xFFFFFFFFLL);
return seed;
};
/*! @brief Structure used to wrap va_list and allow pointer passing.
@param va va_list Wrapped variable argument list.
*/
typedef struct va_list_t {
va_list va; //!< Traditional variable argument list.
int using_ptrs; //!< Flag that is 1 if the arguments are stored using pointers.
void **ptrs; //!< Variable arguments stored as pointers.
int nptrs; //!< The number of variable arguments stored as pointers.
int iptr; //!< The index of the current variable argument pointer.
int for_fortran; //!< Flag that is 1 if this structure will be accessed by fortran.
} va_list_t;
/*! @brief Structure used to wrap Python objects. */
typedef struct python_t {
char name[PYTHON_NAME_SIZE]; //!<Name of the Python class/type/function.
void *args; //!< Arguments used in creating a Python instance.
void *kwargs; //!< Keyword arguments used in creating a Python instance.
PyObject *obj; //!< Python object.
} python_t;
/*!
@brief Get the ID for the current thread (if inside one).
@returns int Thread ID.
*/
static inline
int get_thread_id() {
int out = 0;
if (global_thread_id >= 0)
return global_thread_id;
#ifdef _OPENMP
if (omp_in_parallel())
out = omp_get_thread_num();
/* #elif defined pthread_self */
/* // TODO: Finalize/test support for pthread */
/* out = pthread_self(); */
#endif
return out;
};
/*!
@brief Initialize a structure to contain a Python object.
@returns python_t New Python object structure.
*/
static inline
python_t init_python() {
python_t out;
out.name[0] = '\0';
out.args = NULL;
out.kwargs = NULL;
out.obj = NULL;
return out;
};
/*!
@brief Initialize Numpy arrays if it is not initalized.
@returns int 0 if successful, other values indicate errors.
*/
static inline
int init_numpy_API() {
int out = 0;
#ifdef _OPENMP
#pragma omp critical (numpy)
{
#endif
if (PyArray_API == NULL) {
if (_import_array() < 0) {
PyErr_Print();
out = -2;
}
}
#ifdef _OPENMP
}
#endif
return out;
};
/*!
@brief Initialize Python if it is not initialized.
@returns int 0 if successful, other values indicate errors.
*/
static inline
int init_python_API() {
int out = 0;
#ifdef _OPENMP
#pragma omp critical (python)
{
#endif
if (!(Py_IsInitialized())) {
char *name = getenv("YGG_PYTHON_EXEC");
if (name != NULL) {
wchar_t *wname = Py_DecodeLocale(name, NULL);
if (wname == NULL) {
printf("Error decoding YGG_PYTHON_EXEC\n");
out = -1;
} else {
Py_SetProgramName(wname);
PyMem_RawFree(wname);
}
}
if (out >= 0) {
Py_Initialize();
if (!(Py_IsInitialized()))
out = -1;
}
}
if (out >= 0) {
out = init_numpy_API();
}
#ifdef _OPENMP
}
#endif
return out;
};
//==============================================================================
/*!
Logging
Alliases are set at compile-time based on the value of YGG_CLIENT_DEBUG. If
set to INFO, only messages logged with info or error alias are printed. If
set to DEBUG, messages logged with error, info or debug aliases are printed.
Otherwise, only error messages are printed. If the YGG_CLIENT_DEBUG is
changed, any code including this header must be recompiled for the change to
take effect.
*/
//==============================================================================
/*!
@brief Print a log message.
Prints a formatted message, prepending it with the process id and appending
it with a newline.
@param[in] prefix a constant character pointer to the prefix that should
preceed the message and process id.
@param[in] fmt a constant character pointer to a format string.
@param[in] ap va_list of arguments to be formatted in the format string.
*/
static inline
void yggLog(const char* prefix, const char* fmt, va_list ap) {
fprintf(stdout, "%s: %d:%d ", prefix, ygg_getpid(), get_thread_id());
char *model_name = getenv("YGG_MODEL_NAME");
if (model_name != NULL) {
fprintf(stdout, "%s", model_name);
char *model_copy = getenv("YGG_MODEL_COPY");
if (model_copy != NULL) {
fprintf(stdout, "_copy%s", model_copy);
}
fprintf(stdout, " ");
}
vfprintf(stdout, fmt, ap);
fprintf(stdout, "\n");
fflush(stdout);
};
/*!
@brief Print an info log message.
Prints a formatted message, prepending it with INFO and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggInfo(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggLog("INFO", fmt, ap);
va_end(ap);
};
/*!
@brief Print an debug log message.
Prints a formatted message, prepending it with DEBUG and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggDebug(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggLog("DEBUG", fmt, ap);
va_end(ap);
};
/*!
@brief Print an error log message from a variable argument list.
Prints a formatted message, prepending it with ERROR and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ap va_list Variable argument list.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggError_va(const char* fmt, va_list ap) {
yggLog("ERROR", fmt, ap);
_ygg_error_flag = 1;
};
/*!
@brief Print an error log message.
Prints a formatted message, prepending it with ERROR and the process id. A
newline character is added to the end of the message.
@param[in] fmt a constant character pointer to a format string.
@param[in] ... arguments to be formatted in the format string.
*/
static inline
void yggError(const char* fmt, ...) {
va_list ap;
va_start(ap, fmt);
yggError_va(fmt, ap);
va_end(ap);
};
#ifndef DOXYGEN_SHOULD_SKIP_THIS
#ifdef YGG_DEBUG
#if YGG_DEBUG <= 10
#define ygglog_error yggError
#define ygglog_info yggInfo
#define ygglog_debug yggDebug
#elif YGG_DEBUG <= 20
#define ygglog_error yggError
#define ygglog_info yggInfo
#define ygglog_debug while (0) yggDebug
#elif YGG_DEBUG <= 40
#define ygglog_error yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#else
#define ygglog_error while (0) yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#endif
#else
#define ygglog_error yggError
#define ygglog_info while (0) yggInfo
#define ygglog_debug while (0) yggDebug
#endif
#endif // DOXYGEN_SHOULD_SKIP_THIS
/*!
@brief Get the length (in bytes) of a character array containing 4 byte
unicode characters.
@param[in] strarg char* Pointer to character array.
@returns size_t Length of strarg in bytes.
*/
static inline
size_t strlen4(char* strarg) {
if(!strarg)
return 0; //strarg is NULL pointer
char* str = strarg;
for(;*str;str+=4)
; // empty body
return (str - strarg);
}
/*!
@brief Called snprintf and realloc buffer if the formatted string is
larger than the provided buffer.
@param[in] dst char** Pointer to buffer where formatted message
should be stored.
@param[in,out] max_len size_t* Pointer to maximum size of buffer
that will be modified when the buffer is reallocated.
@param[in,out] offset size_t* Pointer to offset in buffer where the
formatted message should be stored. This will be updated to the end
of the updated message.
@param[in] format_str const char* Format string that should be used.
@param[in] ... Additional arguments are passed to snprintf as
parameters for formatting.
@returns int -1 if there is an error, otherwise the number of new
characters written to the buffer.
*/
static inline
int snprintf_realloc(char** dst, size_t* max_len, size_t* offset,
const char* format_str, ...) {
va_list arglist;
va_start(arglist, format_str);
int fmt_len = 0;
while (1) {
va_list arglist_copy;
va_copy(arglist_copy, arglist);
fmt_len = vsnprintf(dst[0] + offset[0],
max_len[0] - offset[0],
format_str, arglist_copy);
if (fmt_len > (int)(max_len[0] - offset[0])) {
max_len[0] = max_len[0] + fmt_len + 1;
char* temp = (char*)realloc(dst[0], max_len[0]);
if (temp == NULL) {
ygglog_error("snprintf_realloc: Error reallocating buffer.");
fmt_len = -1;
break;
}
dst[0] = temp;
} else {
offset[0] = offset[0] + fmt_len;
break;
}
}
va_end(arglist);
return fmt_len;
};
/*!
@brief Check if a character array matches a message and is non-zero length.
@param[in] pattern constant character pointer to string that should be checked.
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf matches pattern, 0 otherwise.
*/
static inline
int not_empty_match(const char *pattern, const char *buf) {
if (buf == NULL)
return 0;
if (buf[0] == '\0')
return 0;
if (strcmp(buf, pattern) == 0) {
return 1;
} else {
return 0;
}
};
/*!
@brief Check if a character array matches the internal EOF message.
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the EOF message, 0 otherwise.
*/
static inline
int is_eof(const char *buf) {
return not_empty_match(YGG_MSG_EOF, buf);
};
/*!
@brief Check if a character array matches "recv".
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the "recv" message, 0 otherwise.
*/
static inline
int is_recv(const char *buf) {
return not_empty_match("recv", buf);
};
/*!
@brief Check if a character array matches "send".
@param[in] buf constant character pointer to string that should be checked.
@returns int 1 if buf is the "send" message, 0 otherwise.
*/
static inline
int is_send(const char *buf) {
return not_empty_match("send", buf);
};
/*!
@brief Initialize a variable argument list from an existing va_list.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t init_va_list() {
va_list_t out;
out.using_ptrs = 0;
out.ptrs = NULL;
out.nptrs = 0;
out.iptr = 0;
out.for_fortran = 0;
return out;
};
/*! Initialize a variable argument list from an array of pointers.
@param[in] nptrs int Number of pointers.
@param[in] ptrs void** Array of pointers.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t init_va_ptrs(const int nptrs, void** ptrs) {
va_list_t out;
out.using_ptrs = 1;
out.ptrs = ptrs;
out.nptrs = nptrs;
out.iptr = 0;
out.for_fortran = 0;
return out;
};
/*! Finalize a variable argument list.
@param[in] ap va_list_t Variable argument list.
*/
static inline
void end_va_list(va_list_t *ap) {
if (!(ap->using_ptrs)) {
va_end(ap->va);
}
};
/*! Copy a variable argument list.
@param[in] ap va_list_t Variable argument list structure to copy.
@returns va_list_t New variable argument list structure.
*/
static inline
va_list_t copy_va_list(va_list_t ap) {
va_list_t out;
if (ap.using_ptrs) {
out = init_va_ptrs(ap.nptrs, ap.ptrs);
out.iptr = ap.iptr;
} else {
out = init_va_list();
va_copy(out.va, ap.va);
}
out.for_fortran = ap.for_fortran;
return out;
};
/*! @brief Method for skipping a number of bytes in the argument list.
@param[in] ap va_list_t* Structure containing variable argument list.
@param[in] nbytes size_t Number of bytes that should be skipped.
*/
static inline
void va_list_t_skip(va_list_t *ap, size_t nbytes) {
if (ap->using_ptrs) {
ap->iptr++;
} else {
if (nbytes == sizeof(void*)) {
va_arg(ap->va, void*);
} else if (nbytes == sizeof(size_t)) {
va_arg(ap->va, size_t);
} else if (nbytes == sizeof(char*)) {
va_arg(ap->va, char*);
} else {
printf("WARNING: Cannot get argument of size %ld.\n", nbytes);
va_arg(ap->va, void*);
// va_arg(ap->va, char[nbytes]);
}
}
};
#ifdef __cplusplus /* If this is a C++ compiler, end C linkage */
}
#endif
#endif /*YGGTOOLS_H_*/
|
GB_binop__plus_int32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_08__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_02__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_04__plus_int32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_int32)
// A*D function (colscale): GB (_AxD__plus_int32)
// D*A function (rowscale): GB (_DxB__plus_int32)
// C+=B function (dense accum): GB (_Cdense_accumB__plus_int32)
// C+=b function (dense accum): GB (_Cdense_accumb__plus_int32)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_int32)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_int32)
// C=scalar+B GB (_bind1st__plus_int32)
// C=scalar+B' GB (_bind1st_tran__plus_int32)
// C=A+scalar GB (_bind2nd__plus_int32)
// C=A'+scalar GB (_bind2nd_tran__plus_int32)
// C type: int32_t
// A type: int32_t
// B,b type: int32_t
// BinaryOp: cij = (aij + bij)
#define GB_ATYPE \
int32_t
#define GB_BTYPE \
int32_t
#define GB_CTYPE \
int32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int32_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int32_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x + y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PLUS || GxB_NO_INT32 || GxB_NO_PLUS_INT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__plus_int32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int32_t
int32_t bwork = (*((int32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *restrict Cx = (int32_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__plus_int32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__plus_int32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__plus_int32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t *Cx = (int32_t *) Cx_output ;
int32_t x = (*((int32_t *) x_input)) ;
int32_t *Bx = (int32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int32_t bij = GBX (Bx, p, false) ;
Cx [p] = (x + bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__plus_int32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int32_t *Cx = (int32_t *) Cx_output ;
int32_t *Ax = (int32_t *) Ax_input ;
int32_t y = (*((int32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int32_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij + y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x + aij) ; \
}
GrB_Info GB (_bind1st_tran__plus_int32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t x = (*((const int32_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int32_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij + y) ; \
}
GrB_Info GB (_bind2nd_tran__plus_int32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int32_t y = (*((const int32_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__rdiv_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__rdiv_uint8
// A.*B function (eWiseMult): GB_AemultB__rdiv_uint8
// A*D function (colscale): GB_AxD__rdiv_uint8
// D*A function (rowscale): GB_DxB__rdiv_uint8
// C+=B function (dense accum): GB_Cdense_accumB__rdiv_uint8
// C+=b function (dense accum): GB_Cdense_accumb__rdiv_uint8
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__rdiv_uint8
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__rdiv_uint8
// C=scalar+B GB_bind1st__rdiv_uint8
// C=scalar+B' GB_bind1st_tran__rdiv_uint8
// C=A+scalar GB_bind2nd__rdiv_uint8
// C=A'+scalar GB_bind2nd_tran__rdiv_uint8
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = GB_IDIV_UNSIGNED (bij, aij, 8)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = GB_IDIV_UNSIGNED (y, x, 8) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RDIV || GxB_NO_UINT8 || GxB_NO_RDIV_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__rdiv_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *GB_RESTRICT Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__rdiv_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t bij = Bx [p] ;
Cx [p] = GB_IDIV_UNSIGNED (bij, x, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__rdiv_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint8_t aij = Ax [p] ;
Cx [p] = GB_IDIV_UNSIGNED (y, aij, 8) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (aij, x, 8) ; \
}
GrB_Info GB_bind1st_tran__rdiv_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = GB_IDIV_UNSIGNED (y, aij, 8) ; \
}
GrB_Info GB_bind2nd_tran__rdiv_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
indirectaccess2-orig-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
// Two pointers have distance of 12 (p1 - p2 = 12).
// They are used as base addresses for indirect array accesses using an index set (another array).
//
// An index set has two indices with distance of 12 :
// indexSet[5]- indexSet[0] = 533 - 521 = 12
// So there is loop carried dependence for N=0 and N=5.
//
// We use the default loop scheduling (static even) in OpenMP.
// It is possible that two dependent iterations will be scheduled
// within a same chunk to a same thread. So there is no runtime data races.
//
// N is 180, two iteraions with N=0 and N= 5 have loop carried dependences.
// For static even scheduling, we must have at least 36 threads (180/36=5 iterations)
// so iteration 0 and 5 will be scheduled to two different threads.
//
// Liao, 2/07/2017
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#define N 180
int indexSet[N] = {
521, 523, 525, 527, 529, 533, // change 531 to 521+12=533
547, 549, 551, 553, 555, 557,
573, 575, 577, 579, 581, 583,
599, 601, 603, 605, 607, 609,
625, 627, 629, 631, 633, 635,
651, 653, 655, 657, 659, 661,
859, 861, 863, 865, 867, 869,
885, 887, 889, 891, 893, 895,
911, 913, 915, 917, 919, 921,
937, 939, 941, 943, 945, 947,
963, 965, 967, 969, 971, 973,
989, 991, 993, 995, 997, 999,
1197, 1199, 1201, 1203, 1205, 1207,
1223, 1225, 1227, 1229, 1231, 1233,
1249, 1251, 1253, 1255, 1257, 1259,
1275, 1277, 1279, 1281, 1283, 1285,
1301, 1303, 1305, 1307, 1309, 1311,
1327, 1329, 1331, 1333, 1335, 1337,
1535, 1537, 1539, 1541, 1543, 1545,
1561, 1563, 1565, 1567, 1569, 1571,
1587, 1589, 1591, 1593, 1595, 1597,
1613, 1615, 1617, 1619, 1621, 1623,
1639, 1641, 1643, 1645, 1647, 1649,
1665, 1667, 1669, 1671, 1673, 1675,
1873, 1875, 1877, 1879, 1881, 1883,
1899, 1901, 1903, 1905, 1907, 1909,
1925, 1927, 1929, 1931, 1933, 1935,
1951, 1953, 1955, 1957, 1959, 1961,
1977, 1979, 1981, 1983, 1985, 1987,
2003, 2005, 2007, 2009, 2011, 2013};
int main (int argc, char* argv[])
{
double * base = (double*) malloc(sizeof(double)* (2013+12+1));
if (base == 0)
{
printf ("Error in malloc(). Aborting ...\n");
return 1;
}
double * xa1 = base;
double * xa3 = xa1 + 12;
int i;
// initialize segments touched by indexSet
for (i =521; i<= 2025; ++i)
{
base[i]=0.5*i;
}
#pragma omp parallel for // default static even scheduling may not trigger data race!
for (i =0; i< N; ++i)
{
int idx = indexSet[i];
xa1[idx]+= 1.0;
xa3[idx]+= 3.0;
}
printf("x1[999]=%f xa3[1285]=%f\n", xa1[999], xa3[1285]);
free (base);
return 0;
}
|
complex-align-2.c | #pragma omp declare target
_Complex int *g;
#pragma omp end declare target
_Complex float f(void);
int
main ()
{
_Complex int y;
#pragma omp target map(from:y)
{
_Complex int x;
g = &x;
__imag__ x = 1;
__real__ x = 2;
y = x;
}
if ((__imag__ y != 1)
|| (__real__ y != 2))
__builtin_abort ();
return 0;
}
|
r_numint.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <stdio.h>
#include <complex.h>
#include "config.h"
#include "gto/grid_ao_drv.h"
#include "np_helper/np_helper.h"
#include "vhf/fblas.h"
#include <assert.h>
#define BOXSIZE 56
int VXCao_empty_blocks(char *empty, unsigned char *non0table, int *shls_slice,
int *ao_loc);
static void dot_ao_dm(double complex *vm, double complex *ao, double complex *dm,
int nao, int nocc, int ngrids, int bgrids,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_T = 'T';
const char TRANS_N = 'N';
const double complex Z1 = 1;
double complex beta = 0;
if (has0) {
int box_id, blen, i, j;
size_t b0;
for (box_id = 0; box_id < nbox; box_id++) {
if (!empty[box_id]) {
b0 = box_id * BOXSIZE;
blen = MIN(nao-b0, BOXSIZE);
zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &blen,
&Z1, ao+b0*ngrids, &ngrids, dm+b0*nocc, &nocc,
&beta, vm, &ngrids);
beta = 1.0;
}
}
if (beta == 0) { // all empty
for (i = 0; i < nocc; i++) {
for (j = 0; j < bgrids; j++) {
vm[i*ngrids+j] = 0;
}
}
}
} else {
zgemm_(&TRANS_N, &TRANS_T, &bgrids, &nocc, &nao,
&Z1, ao, &ngrids, dm, &nocc, &beta, vm, &ngrids);
}
}
/* vm[nocc,ngrids] = ao[i,ngrids] * dm[i,nocc] */
void VXCzdot_ao_dm(double complex *vm, double complex *ao, double complex *dm,
int nao, int nocc, int ngrids, int nbas,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
#pragma omp parallel
{
int ip, ib;
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_dm(vm+ip, ao+ip, dm,
nao, nocc, ngrids, MIN(ngrids-ip, BLKSIZE),
non0table+ib*nbas, shls_slice, ao_loc);
}
}
}
/* conj(vv[n,m]) = ao1[n,ngrids] * conj(ao2[m,ngrids]) */
static void dot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2,
int nao, int ngrids, int bgrids, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
int nbox = (nao+BOXSIZE-1) / BOXSIZE;
char empty[nbox];
int has0 = VXCao_empty_blocks(empty, non0table, shls_slice, ao_loc);
const char TRANS_C = 'C';
const char TRANS_N = 'N';
const double complex Z1 = 1;
if (has0) {
int ib, jb, leni, lenj;
int j1 = nbox;
size_t b0i, b0j;
for (ib = 0; ib < nbox; ib++) {
if (!empty[ib]) {
b0i = ib * BOXSIZE;
leni = MIN(nao-b0i, BOXSIZE);
if (hermi) {
j1 = ib + 1;
}
for (jb = 0; jb < j1; jb++) {
if (!empty[jb]) {
b0j = jb * BOXSIZE;
lenj = MIN(nao-b0j, BOXSIZE);
zgemm_(&TRANS_C, &TRANS_N, &lenj, &leni, &bgrids, &Z1,
ao2+b0j*ngrids, &ngrids, ao1+b0i*ngrids, &ngrids,
&Z1, vv+b0i*nao+b0j, &nao);
} }
} }
} else {
zgemm_(&TRANS_C, &TRANS_N, &nao, &nao, &bgrids,
&Z1, ao2, &ngrids, ao1, &ngrids, &Z1, vv, &nao);
}
}
/* vv[nao,nao] = conj(ao1[i,nao]) * ao2[i,nao] */
void VXCzdot_ao_ao(double complex *vv, double complex *ao1, double complex *ao2,
int nao, int ngrids, int nbas, int hermi,
unsigned char *non0table, int *shls_slice, int *ao_loc)
{
const int nblk = (ngrids+BLKSIZE-1) / BLKSIZE;
size_t Nao = nao;
NPzset0(vv, Nao * Nao);
#pragma omp parallel
{
int ip, ib;
double complex *v_priv = calloc(nao*nao+2, sizeof(double complex));
#pragma omp for nowait schedule(static)
for (ib = 0; ib < nblk; ib++) {
ip = ib * BLKSIZE;
dot_ao_ao(v_priv, ao1+ip, ao2+ip,
nao, ngrids, MIN(ngrids-ip, BLKSIZE), hermi,
non0table+ib*nbas, shls_slice, ao_loc);
}
#pragma omp critical
{
for (ip = 0; ip < nao*nao; ip++) {
vv[ip] += conj(v_priv[ip]);
}
}
free(v_priv);
}
if (hermi != 0) {
NPzhermi_triu(nao, vv, hermi);
}
}
void VXC_zscale_ao(double complex *aow, double complex *ao, double *wv,
int comp, int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
size_t ao_size = nao * Ngrids;
int i, j, ic;
double complex *pao = ao;
#pragma omp for schedule(static)
for (i = 0; i < nao; i++) {
pao = ao + i * Ngrids;
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] = pao[j] * wv[j];
}
for (ic = 1; ic < comp; ic++) {
for (j = 0; j < Ngrids; j++) {
aow[i*Ngrids+j] += pao[ic*ao_size+j] * wv[ic*Ngrids+j];
} }
}
}
}
// 'ip,ip->p'
void VXC_zcontract_rho(double *rho, double complex *bra, double complex *ket,
int nao, int ngrids)
{
#pragma omp parallel
{
size_t Ngrids = ngrids;
int nthread = omp_get_num_threads();
int blksize = MAX((Ngrids+nthread-1) / nthread, 1);
int ib, b0, b1, i, j;
#pragma omp for
for (ib = 0; ib < nthread; ib++) {
b0 = ib * blksize;
b1 = MIN(b0 + blksize, ngrids);
for (j = b0; j < b1; j++) {
rho[j] = creal(bra[j]) * creal(ket[j])
+ cimag(bra[j]) * cimag(ket[j]);
}
for (i = 1; i < nao; i++) {
for (j = b0; j < b1; j++) {
rho[j] += creal(bra[i*Ngrids+j]) * creal(ket[i*Ngrids+j])
+ cimag(bra[i*Ngrids+j]) * cimag(ket[i*Ngrids+j]);
} }
}
}
}
|
CRC64.h | /*
* Copyright (C) 2015, UChicago Argonne, LLC
* All Rights Reserved
*
* Generic IO (ANL-15-066)
* Hal Finkel, Argonne National Laboratory
*
* OPEN SOURCE LICENSE
*
* Under the terms of Contract No. DE-AC02-06CH11357 with UChicago Argonne,
* LLC, the U.S. Government retains certain rights in this software.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* 3. Neither the names of UChicago Argonne, LLC or the Department of Energy
* nor the names of its contributors may be used to endorse or promote
* products derived from this software without specific prior written
* permission.
*
* *****************************************************************************
*
* DISCLAIMER
* THE SOFTWARE IS SUPPLIED “AS IS” WITHOUT WARRANTY OF ANY KIND. NEITHER THE
* UNTED STATES GOVERNMENT, NOR THE UNITED STATES DEPARTMENT OF ENERGY, NOR
* UCHICAGO ARGONNE, LLC, NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY,
* EXPRESS OR IMPLIED, OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE
* ACCURACY, COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, DATA, APPARATUS,
* PRODUCT, OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE
* PRIVATELY OWNED RIGHTS.
*
* *****************************************************************************
*/
#ifndef CRC64_H
#define CRC64_H
#ifndef __STDC_CONSTANT_MACROS
#define __STDC_CONSTANT_MACROS
#endif
#include <cstdlib>
#include <stdint.h>
#ifdef _OPENMP
#include <omp.h>
#endif
namespace lanl
{
// These functions compute the CRC-64 checksum on a block of data
// and provide a way to combine the checksums on two blocks of data.
// For more information, see:
// http://en.wikipedia.org/wiki/Computation_of_CRC
// http://checksumcrc.blogspot.com/2011/12/should-you-use-crc-or-checksum.html
// http://crcutil.googlecode.com/files/crc-doc.1.0.pdf
// http://www.ross.net/crc/download/crc_v3.txt
// This uses the ECMA-182 polynomial with -1 initialization, and computes
// the bit-reversed CRC.
// The polynomial here is the bit-reversed encoding of 0x42f0e1eba9ea3693.
static const uint64_t crc64_poly = UINT64_C(0xc96c5795d7870f42);
static const uint64_t crc64_table[4][256] = {
{ UINT64_C(0x0000000000000000), UINT64_C(0x1dee8a5e222ca1dc), UINT64_C(0x3bdd14bc445943b8),
UINT64_C(0x26339ee26675e264), UINT64_C(0x77ba297888b28770), UINT64_C(0x6a54a326aa9e26ac),
UINT64_C(0x4c673dc4ccebc4c8), UINT64_C(0x5189b79aeec76514), UINT64_C(0xef7452f111650ee0),
UINT64_C(0xf29ad8af3349af3c), UINT64_C(0xd4a9464d553c4d58), UINT64_C(0xc947cc137710ec84),
UINT64_C(0x98ce7b8999d78990), UINT64_C(0x8520f1d7bbfb284c), UINT64_C(0xa3136f35dd8eca28),
UINT64_C(0xbefde56bffa26bf4), UINT64_C(0x4c300ac98dc40345), UINT64_C(0x51de8097afe8a299),
UINT64_C(0x77ed1e75c99d40fd), UINT64_C(0x6a03942bebb1e121), UINT64_C(0x3b8a23b105768435),
UINT64_C(0x2664a9ef275a25e9), UINT64_C(0x0057370d412fc78d), UINT64_C(0x1db9bd5363036651),
UINT64_C(0xa34458389ca10da5), UINT64_C(0xbeaad266be8dac79), UINT64_C(0x98994c84d8f84e1d),
UINT64_C(0x8577c6dafad4efc1), UINT64_C(0xd4fe714014138ad5), UINT64_C(0xc910fb1e363f2b09),
UINT64_C(0xef2365fc504ac96d), UINT64_C(0xf2cdefa2726668b1), UINT64_C(0x986015931b88068a),
UINT64_C(0x858e9fcd39a4a756), UINT64_C(0xa3bd012f5fd14532), UINT64_C(0xbe538b717dfde4ee),
UINT64_C(0xefda3ceb933a81fa), UINT64_C(0xf234b6b5b1162026), UINT64_C(0xd4072857d763c242),
UINT64_C(0xc9e9a209f54f639e), UINT64_C(0x771447620aed086a), UINT64_C(0x6afacd3c28c1a9b6),
UINT64_C(0x4cc953de4eb44bd2), UINT64_C(0x5127d9806c98ea0e), UINT64_C(0x00ae6e1a825f8f1a),
UINT64_C(0x1d40e444a0732ec6), UINT64_C(0x3b737aa6c606cca2), UINT64_C(0x269df0f8e42a6d7e),
UINT64_C(0xd4501f5a964c05cf), UINT64_C(0xc9be9504b460a413), UINT64_C(0xef8d0be6d2154677),
UINT64_C(0xf26381b8f039e7ab), UINT64_C(0xa3ea36221efe82bf), UINT64_C(0xbe04bc7c3cd22363),
UINT64_C(0x9837229e5aa7c107), UINT64_C(0x85d9a8c0788b60db), UINT64_C(0x3b244dab87290b2f),
UINT64_C(0x26cac7f5a505aaf3), UINT64_C(0x00f95917c3704897), UINT64_C(0x1d17d349e15ce94b),
UINT64_C(0x4c9e64d30f9b8c5f), UINT64_C(0x5170ee8d2db72d83), UINT64_C(0x7743706f4bc2cfe7),
UINT64_C(0x6aadfa3169ee6e3b), UINT64_C(0xa218840d981e1391), UINT64_C(0xbff60e53ba32b24d),
UINT64_C(0x99c590b1dc475029), UINT64_C(0x842b1aeffe6bf1f5), UINT64_C(0xd5a2ad7510ac94e1),
UINT64_C(0xc84c272b3280353d), UINT64_C(0xee7fb9c954f5d759), UINT64_C(0xf391339776d97685),
UINT64_C(0x4d6cd6fc897b1d71), UINT64_C(0x50825ca2ab57bcad), UINT64_C(0x76b1c240cd225ec9),
UINT64_C(0x6b5f481eef0eff15), UINT64_C(0x3ad6ff8401c99a01), UINT64_C(0x273875da23e53bdd),
UINT64_C(0x010beb384590d9b9), UINT64_C(0x1ce5616667bc7865), UINT64_C(0xee288ec415da10d4),
UINT64_C(0xf3c6049a37f6b108), UINT64_C(0xd5f59a785183536c), UINT64_C(0xc81b102673aff2b0),
UINT64_C(0x9992a7bc9d6897a4), UINT64_C(0x847c2de2bf443678), UINT64_C(0xa24fb300d931d41c),
UINT64_C(0xbfa1395efb1d75c0), UINT64_C(0x015cdc3504bf1e34), UINT64_C(0x1cb2566b2693bfe8),
UINT64_C(0x3a81c88940e65d8c), UINT64_C(0x276f42d762cafc50), UINT64_C(0x76e6f54d8c0d9944),
UINT64_C(0x6b087f13ae213898), UINT64_C(0x4d3be1f1c854dafc), UINT64_C(0x50d56bafea787b20),
UINT64_C(0x3a78919e8396151b), UINT64_C(0x27961bc0a1bab4c7), UINT64_C(0x01a58522c7cf56a3),
UINT64_C(0x1c4b0f7ce5e3f77f), UINT64_C(0x4dc2b8e60b24926b), UINT64_C(0x502c32b8290833b7),
UINT64_C(0x761fac5a4f7dd1d3), UINT64_C(0x6bf126046d51700f), UINT64_C(0xd50cc36f92f31bfb),
UINT64_C(0xc8e24931b0dfba27), UINT64_C(0xeed1d7d3d6aa5843), UINT64_C(0xf33f5d8df486f99f),
UINT64_C(0xa2b6ea171a419c8b), UINT64_C(0xbf586049386d3d57), UINT64_C(0x996bfeab5e18df33),
UINT64_C(0x848574f57c347eef), UINT64_C(0x76489b570e52165e), UINT64_C(0x6ba611092c7eb782),
UINT64_C(0x4d958feb4a0b55e6), UINT64_C(0x507b05b56827f43a), UINT64_C(0x01f2b22f86e0912e),
UINT64_C(0x1c1c3871a4cc30f2), UINT64_C(0x3a2fa693c2b9d296), UINT64_C(0x27c12ccde095734a),
UINT64_C(0x993cc9a61f3718be), UINT64_C(0x84d243f83d1bb962), UINT64_C(0xa2e1dd1a5b6e5b06),
UINT64_C(0xbf0f57447942fada), UINT64_C(0xee86e0de97859fce), UINT64_C(0xf3686a80b5a93e12),
UINT64_C(0xd55bf462d3dcdc76), UINT64_C(0xc8b57e3cf1f07daa), UINT64_C(0xd6e9a7309f3239a7),
UINT64_C(0xcb072d6ebd1e987b), UINT64_C(0xed34b38cdb6b7a1f), UINT64_C(0xf0da39d2f947dbc3),
UINT64_C(0xa1538e481780bed7), UINT64_C(0xbcbd041635ac1f0b), UINT64_C(0x9a8e9af453d9fd6f),
UINT64_C(0x876010aa71f55cb3), UINT64_C(0x399df5c18e573747), UINT64_C(0x24737f9fac7b969b),
UINT64_C(0x0240e17dca0e74ff), UINT64_C(0x1fae6b23e822d523), UINT64_C(0x4e27dcb906e5b037),
UINT64_C(0x53c956e724c911eb), UINT64_C(0x75fac80542bcf38f), UINT64_C(0x6814425b60905253),
UINT64_C(0x9ad9adf912f63ae2), UINT64_C(0x873727a730da9b3e), UINT64_C(0xa104b94556af795a),
UINT64_C(0xbcea331b7483d886), UINT64_C(0xed6384819a44bd92), UINT64_C(0xf08d0edfb8681c4e),
UINT64_C(0xd6be903dde1dfe2a), UINT64_C(0xcb501a63fc315ff6), UINT64_C(0x75adff0803933402),
UINT64_C(0x6843755621bf95de), UINT64_C(0x4e70ebb447ca77ba), UINT64_C(0x539e61ea65e6d666),
UINT64_C(0x0217d6708b21b372), UINT64_C(0x1ff95c2ea90d12ae), UINT64_C(0x39cac2cccf78f0ca),
UINT64_C(0x24244892ed545116), UINT64_C(0x4e89b2a384ba3f2d), UINT64_C(0x536738fda6969ef1),
UINT64_C(0x7554a61fc0e37c95), UINT64_C(0x68ba2c41e2cfdd49), UINT64_C(0x39339bdb0c08b85d),
UINT64_C(0x24dd11852e241981), UINT64_C(0x02ee8f674851fbe5), UINT64_C(0x1f0005396a7d5a39),
UINT64_C(0xa1fde05295df31cd), UINT64_C(0xbc136a0cb7f39011), UINT64_C(0x9a20f4eed1867275),
UINT64_C(0x87ce7eb0f3aad3a9), UINT64_C(0xd647c92a1d6db6bd), UINT64_C(0xcba943743f411761),
UINT64_C(0xed9add965934f505), UINT64_C(0xf07457c87b1854d9), UINT64_C(0x02b9b86a097e3c68),
UINT64_C(0x1f5732342b529db4), UINT64_C(0x3964acd64d277fd0), UINT64_C(0x248a26886f0bde0c),
UINT64_C(0x7503911281ccbb18), UINT64_C(0x68ed1b4ca3e01ac4), UINT64_C(0x4ede85aec595f8a0),
UINT64_C(0x53300ff0e7b9597c), UINT64_C(0xedcdea9b181b3288), UINT64_C(0xf02360c53a379354),
UINT64_C(0xd610fe275c427130), UINT64_C(0xcbfe74797e6ed0ec), UINT64_C(0x9a77c3e390a9b5f8),
UINT64_C(0x879949bdb2851424), UINT64_C(0xa1aad75fd4f0f640), UINT64_C(0xbc445d01f6dc579c),
UINT64_C(0x74f1233d072c2a36), UINT64_C(0x691fa96325008bea), UINT64_C(0x4f2c37814375698e),
UINT64_C(0x52c2bddf6159c852), UINT64_C(0x034b0a458f9ead46), UINT64_C(0x1ea5801badb20c9a),
UINT64_C(0x38961ef9cbc7eefe), UINT64_C(0x257894a7e9eb4f22), UINT64_C(0x9b8571cc164924d6),
UINT64_C(0x866bfb923465850a), UINT64_C(0xa05865705210676e), UINT64_C(0xbdb6ef2e703cc6b2),
UINT64_C(0xec3f58b49efba3a6), UINT64_C(0xf1d1d2eabcd7027a), UINT64_C(0xd7e24c08daa2e01e),
UINT64_C(0xca0cc656f88e41c2), UINT64_C(0x38c129f48ae82973), UINT64_C(0x252fa3aaa8c488af),
UINT64_C(0x031c3d48ceb16acb), UINT64_C(0x1ef2b716ec9dcb17), UINT64_C(0x4f7b008c025aae03),
UINT64_C(0x52958ad220760fdf), UINT64_C(0x74a614304603edbb), UINT64_C(0x69489e6e642f4c67),
UINT64_C(0xd7b57b059b8d2793), UINT64_C(0xca5bf15bb9a1864f), UINT64_C(0xec686fb9dfd4642b),
UINT64_C(0xf186e5e7fdf8c5f7), UINT64_C(0xa00f527d133fa0e3), UINT64_C(0xbde1d8233113013f),
UINT64_C(0x9bd246c15766e35b), UINT64_C(0x863ccc9f754a4287), UINT64_C(0xec9136ae1ca42cbc),
UINT64_C(0xf17fbcf03e888d60), UINT64_C(0xd74c221258fd6f04), UINT64_C(0xcaa2a84c7ad1ced8),
UINT64_C(0x9b2b1fd69416abcc), UINT64_C(0x86c59588b63a0a10), UINT64_C(0xa0f60b6ad04fe874),
UINT64_C(0xbd188134f26349a8), UINT64_C(0x03e5645f0dc1225c), UINT64_C(0x1e0bee012fed8380),
UINT64_C(0x383870e3499861e4), UINT64_C(0x25d6fabd6bb4c038), UINT64_C(0x745f4d278573a52c),
UINT64_C(0x69b1c779a75f04f0), UINT64_C(0x4f82599bc12ae694), UINT64_C(0x526cd3c5e3064748),
UINT64_C(0xa0a13c6791602ff9), UINT64_C(0xbd4fb639b34c8e25), UINT64_C(0x9b7c28dbd5396c41),
UINT64_C(0x8692a285f715cd9d), UINT64_C(0xd71b151f19d2a889), UINT64_C(0xcaf59f413bfe0955),
UINT64_C(0xecc601a35d8beb31), UINT64_C(0xf1288bfd7fa74aed), UINT64_C(0x4fd56e9680052119),
UINT64_C(0x523be4c8a22980c5), UINT64_C(0x74087a2ac45c62a1), UINT64_C(0x69e6f074e670c37d),
UINT64_C(0x386f47ee08b7a669), UINT64_C(0x2581cdb02a9b07b5), UINT64_C(0x03b253524ceee5d1),
UINT64_C(0x1e5cd90c6ec2440d) },
{ UINT64_C(0x0000000000000000), UINT64_C(0x3f0be14a916a6dcb), UINT64_C(0x7e17c29522d4db96),
UINT64_C(0x411c23dfb3beb65d), UINT64_C(0xfc2f852a45a9b72c), UINT64_C(0xc3246460d4c3dae7),
UINT64_C(0x823847bf677d6cba), UINT64_C(0xbd33a6f5f6170171), UINT64_C(0x6a87a57f245d70dd),
UINT64_C(0x558c4435b5371d16), UINT64_C(0x149067ea0689ab4b), UINT64_C(0x2b9b86a097e3c680),
UINT64_C(0x96a8205561f4c7f1), UINT64_C(0xa9a3c11ff09eaa3a), UINT64_C(0xe8bfe2c043201c67),
UINT64_C(0xd7b4038ad24a71ac), UINT64_C(0xd50f4afe48bae1ba), UINT64_C(0xea04abb4d9d08c71),
UINT64_C(0xab18886b6a6e3a2c), UINT64_C(0x94136921fb0457e7), UINT64_C(0x2920cfd40d135696),
UINT64_C(0x162b2e9e9c793b5d), UINT64_C(0x57370d412fc78d00), UINT64_C(0x683cec0bbeade0cb),
UINT64_C(0xbf88ef816ce79167), UINT64_C(0x80830ecbfd8dfcac), UINT64_C(0xc19f2d144e334af1),
UINT64_C(0xfe94cc5edf59273a), UINT64_C(0x43a76aab294e264b), UINT64_C(0x7cac8be1b8244b80),
UINT64_C(0x3db0a83e0b9afddd), UINT64_C(0x02bb49749af09016), UINT64_C(0x38c63ad73e7bddf1),
UINT64_C(0x07cddb9daf11b03a), UINT64_C(0x46d1f8421caf0667), UINT64_C(0x79da19088dc56bac),
UINT64_C(0xc4e9bffd7bd26add), UINT64_C(0xfbe25eb7eab80716), UINT64_C(0xbafe7d685906b14b),
UINT64_C(0x85f59c22c86cdc80), UINT64_C(0x52419fa81a26ad2c), UINT64_C(0x6d4a7ee28b4cc0e7),
UINT64_C(0x2c565d3d38f276ba), UINT64_C(0x135dbc77a9981b71), UINT64_C(0xae6e1a825f8f1a00),
UINT64_C(0x9165fbc8cee577cb), UINT64_C(0xd079d8177d5bc196), UINT64_C(0xef72395dec31ac5d),
UINT64_C(0xedc9702976c13c4b), UINT64_C(0xd2c29163e7ab5180), UINT64_C(0x93deb2bc5415e7dd),
UINT64_C(0xacd553f6c57f8a16), UINT64_C(0x11e6f50333688b67), UINT64_C(0x2eed1449a202e6ac),
UINT64_C(0x6ff1379611bc50f1), UINT64_C(0x50fad6dc80d63d3a), UINT64_C(0x874ed556529c4c96),
UINT64_C(0xb845341cc3f6215d), UINT64_C(0xf95917c370489700), UINT64_C(0xc652f689e122facb),
UINT64_C(0x7b61507c1735fbba), UINT64_C(0x446ab136865f9671), UINT64_C(0x057692e935e1202c),
UINT64_C(0x3a7d73a3a48b4de7), UINT64_C(0x718c75ae7cf7bbe2), UINT64_C(0x4e8794e4ed9dd629),
UINT64_C(0x0f9bb73b5e236074), UINT64_C(0x30905671cf490dbf), UINT64_C(0x8da3f084395e0cce),
UINT64_C(0xb2a811cea8346105), UINT64_C(0xf3b432111b8ad758), UINT64_C(0xccbfd35b8ae0ba93),
UINT64_C(0x1b0bd0d158aacb3f), UINT64_C(0x2400319bc9c0a6f4), UINT64_C(0x651c12447a7e10a9),
UINT64_C(0x5a17f30eeb147d62), UINT64_C(0xe72455fb1d037c13), UINT64_C(0xd82fb4b18c6911d8),
UINT64_C(0x9933976e3fd7a785), UINT64_C(0xa6387624aebdca4e), UINT64_C(0xa4833f50344d5a58),
UINT64_C(0x9b88de1aa5273793), UINT64_C(0xda94fdc5169981ce), UINT64_C(0xe59f1c8f87f3ec05),
UINT64_C(0x58acba7a71e4ed74), UINT64_C(0x67a75b30e08e80bf), UINT64_C(0x26bb78ef533036e2),
UINT64_C(0x19b099a5c25a5b29), UINT64_C(0xce049a2f10102a85), UINT64_C(0xf10f7b65817a474e),
UINT64_C(0xb01358ba32c4f113), UINT64_C(0x8f18b9f0a3ae9cd8), UINT64_C(0x322b1f0555b99da9),
UINT64_C(0x0d20fe4fc4d3f062), UINT64_C(0x4c3cdd90776d463f), UINT64_C(0x73373cdae6072bf4),
UINT64_C(0x494a4f79428c6613), UINT64_C(0x7641ae33d3e60bd8), UINT64_C(0x375d8dec6058bd85),
UINT64_C(0x08566ca6f132d04e), UINT64_C(0xb565ca530725d13f), UINT64_C(0x8a6e2b19964fbcf4),
UINT64_C(0xcb7208c625f10aa9), UINT64_C(0xf479e98cb49b6762), UINT64_C(0x23cdea0666d116ce),
UINT64_C(0x1cc60b4cf7bb7b05), UINT64_C(0x5dda28934405cd58), UINT64_C(0x62d1c9d9d56fa093),
UINT64_C(0xdfe26f2c2378a1e2), UINT64_C(0xe0e98e66b212cc29), UINT64_C(0xa1f5adb901ac7a74),
UINT64_C(0x9efe4cf390c617bf), UINT64_C(0x9c4505870a3687a9), UINT64_C(0xa34ee4cd9b5cea62),
UINT64_C(0xe252c71228e25c3f), UINT64_C(0xdd592658b98831f4), UINT64_C(0x606a80ad4f9f3085),
UINT64_C(0x5f6161e7def55d4e), UINT64_C(0x1e7d42386d4beb13), UINT64_C(0x2176a372fc2186d8),
UINT64_C(0xf6c2a0f82e6bf774), UINT64_C(0xc9c941b2bf019abf), UINT64_C(0x88d5626d0cbf2ce2),
UINT64_C(0xb7de83279dd54129), UINT64_C(0x0aed25d26bc24058), UINT64_C(0x35e6c498faa82d93),
UINT64_C(0x74fae74749169bce), UINT64_C(0x4bf1060dd87cf605), UINT64_C(0xe318eb5cf9ef77c4),
UINT64_C(0xdc130a1668851a0f), UINT64_C(0x9d0f29c9db3bac52), UINT64_C(0xa204c8834a51c199),
UINT64_C(0x1f376e76bc46c0e8), UINT64_C(0x203c8f3c2d2cad23), UINT64_C(0x6120ace39e921b7e),
UINT64_C(0x5e2b4da90ff876b5), UINT64_C(0x899f4e23ddb20719), UINT64_C(0xb694af694cd86ad2),
UINT64_C(0xf7888cb6ff66dc8f), UINT64_C(0xc8836dfc6e0cb144), UINT64_C(0x75b0cb09981bb035),
UINT64_C(0x4abb2a430971ddfe), UINT64_C(0x0ba7099cbacf6ba3), UINT64_C(0x34ace8d62ba50668),
UINT64_C(0x3617a1a2b155967e), UINT64_C(0x091c40e8203ffbb5), UINT64_C(0x4800633793814de8),
UINT64_C(0x770b827d02eb2023), UINT64_C(0xca382488f4fc2152), UINT64_C(0xf533c5c265964c99),
UINT64_C(0xb42fe61dd628fac4), UINT64_C(0x8b2407574742970f), UINT64_C(0x5c9004dd9508e6a3),
UINT64_C(0x639be59704628b68), UINT64_C(0x2287c648b7dc3d35), UINT64_C(0x1d8c270226b650fe),
UINT64_C(0xa0bf81f7d0a1518f), UINT64_C(0x9fb460bd41cb3c44), UINT64_C(0xdea84362f2758a19),
UINT64_C(0xe1a3a228631fe7d2), UINT64_C(0xdbded18bc794aa35), UINT64_C(0xe4d530c156fec7fe),
UINT64_C(0xa5c9131ee54071a3), UINT64_C(0x9ac2f254742a1c68), UINT64_C(0x27f154a1823d1d19),
UINT64_C(0x18fab5eb135770d2), UINT64_C(0x59e69634a0e9c68f), UINT64_C(0x66ed777e3183ab44),
UINT64_C(0xb15974f4e3c9dae8), UINT64_C(0x8e5295be72a3b723), UINT64_C(0xcf4eb661c11d017e),
UINT64_C(0xf045572b50776cb5), UINT64_C(0x4d76f1dea6606dc4), UINT64_C(0x727d1094370a000f),
UINT64_C(0x3361334b84b4b652), UINT64_C(0x0c6ad20115dedb99), UINT64_C(0x0ed19b758f2e4b8f),
UINT64_C(0x31da7a3f1e442644), UINT64_C(0x70c659e0adfa9019), UINT64_C(0x4fcdb8aa3c90fdd2),
UINT64_C(0xf2fe1e5fca87fca3), UINT64_C(0xcdf5ff155bed9168), UINT64_C(0x8ce9dccae8532735),
UINT64_C(0xb3e23d8079394afe), UINT64_C(0x64563e0aab733b52), UINT64_C(0x5b5ddf403a195699),
UINT64_C(0x1a41fc9f89a7e0c4), UINT64_C(0x254a1dd518cd8d0f), UINT64_C(0x9879bb20eeda8c7e),
UINT64_C(0xa7725a6a7fb0e1b5), UINT64_C(0xe66e79b5cc0e57e8), UINT64_C(0xd96598ff5d643a23),
UINT64_C(0x92949ef28518cc26), UINT64_C(0xad9f7fb81472a1ed), UINT64_C(0xec835c67a7cc17b0),
UINT64_C(0xd388bd2d36a67a7b), UINT64_C(0x6ebb1bd8c0b17b0a), UINT64_C(0x51b0fa9251db16c1),
UINT64_C(0x10acd94de265a09c), UINT64_C(0x2fa73807730fcd57), UINT64_C(0xf8133b8da145bcfb),
UINT64_C(0xc718dac7302fd130), UINT64_C(0x8604f9188391676d), UINT64_C(0xb90f185212fb0aa6),
UINT64_C(0x043cbea7e4ec0bd7), UINT64_C(0x3b375fed7586661c), UINT64_C(0x7a2b7c32c638d041),
UINT64_C(0x45209d785752bd8a), UINT64_C(0x479bd40ccda22d9c), UINT64_C(0x789035465cc84057),
UINT64_C(0x398c1699ef76f60a), UINT64_C(0x0687f7d37e1c9bc1), UINT64_C(0xbbb45126880b9ab0),
UINT64_C(0x84bfb06c1961f77b), UINT64_C(0xc5a393b3aadf4126), UINT64_C(0xfaa872f93bb52ced),
UINT64_C(0x2d1c7173e9ff5d41), UINT64_C(0x121790397895308a), UINT64_C(0x530bb3e6cb2b86d7),
UINT64_C(0x6c0052ac5a41eb1c), UINT64_C(0xd133f459ac56ea6d), UINT64_C(0xee3815133d3c87a6),
UINT64_C(0xaf2436cc8e8231fb), UINT64_C(0x902fd7861fe85c30), UINT64_C(0xaa52a425bb6311d7),
UINT64_C(0x9559456f2a097c1c), UINT64_C(0xd44566b099b7ca41), UINT64_C(0xeb4e87fa08dda78a),
UINT64_C(0x567d210ffecaa6fb), UINT64_C(0x6976c0456fa0cb30), UINT64_C(0x286ae39adc1e7d6d),
UINT64_C(0x176102d04d7410a6), UINT64_C(0xc0d5015a9f3e610a), UINT64_C(0xffdee0100e540cc1),
UINT64_C(0xbec2c3cfbdeaba9c), UINT64_C(0x81c922852c80d757), UINT64_C(0x3cfa8470da97d626),
UINT64_C(0x03f1653a4bfdbbed), UINT64_C(0x42ed46e5f8430db0), UINT64_C(0x7de6a7af6929607b),
UINT64_C(0x7f5deedbf3d9f06d), UINT64_C(0x40560f9162b39da6), UINT64_C(0x014a2c4ed10d2bfb),
UINT64_C(0x3e41cd0440674630), UINT64_C(0x83726bf1b6704741), UINT64_C(0xbc798abb271a2a8a),
UINT64_C(0xfd65a96494a49cd7), UINT64_C(0xc26e482e05cef11c), UINT64_C(0x15da4ba4d78480b0),
UINT64_C(0x2ad1aaee46eeed7b), UINT64_C(0x6bcd8931f5505b26), UINT64_C(0x54c6687b643a36ed),
UINT64_C(0xe9f5ce8e922d379c), UINT64_C(0xd6fe2fc403475a57), UINT64_C(0x97e20c1bb0f9ec0a),
UINT64_C(0xa8e9ed51219381c1) },
{ UINT64_C(0x0000000000000000), UINT64_C(0x54e979925cd0f10d), UINT64_C(0xa9d2f324b9a1e21a),
UINT64_C(0xfd3b8ab6e5711317), UINT64_C(0xc17d4962dc4ddab1), UINT64_C(0x959430f0809d2bbc),
UINT64_C(0x68afba4665ec38ab), UINT64_C(0x3c46c3d4393cc9a6), UINT64_C(0x10223dee1795abe7),
UINT64_C(0x44cb447c4b455aea), UINT64_C(0xb9f0cecaae3449fd), UINT64_C(0xed19b758f2e4b8f0),
UINT64_C(0xd15f748ccbd87156), UINT64_C(0x85b60d1e9708805b), UINT64_C(0x788d87a87279934c),
UINT64_C(0x2c64fe3a2ea96241), UINT64_C(0x20447bdc2f2b57ce), UINT64_C(0x74ad024e73fba6c3),
UINT64_C(0x899688f8968ab5d4), UINT64_C(0xdd7ff16aca5a44d9), UINT64_C(0xe13932bef3668d7f),
UINT64_C(0xb5d04b2cafb67c72), UINT64_C(0x48ebc19a4ac76f65), UINT64_C(0x1c02b80816179e68),
UINT64_C(0x3066463238befc29), UINT64_C(0x648f3fa0646e0d24), UINT64_C(0x99b4b516811f1e33),
UINT64_C(0xcd5dcc84ddcfef3e), UINT64_C(0xf11b0f50e4f32698), UINT64_C(0xa5f276c2b823d795),
UINT64_C(0x58c9fc745d52c482), UINT64_C(0x0c2085e60182358f), UINT64_C(0x4088f7b85e56af9c),
UINT64_C(0x14618e2a02865e91), UINT64_C(0xe95a049ce7f74d86), UINT64_C(0xbdb37d0ebb27bc8b),
UINT64_C(0x81f5beda821b752d), UINT64_C(0xd51cc748decb8420), UINT64_C(0x28274dfe3bba9737),
UINT64_C(0x7cce346c676a663a), UINT64_C(0x50aaca5649c3047b), UINT64_C(0x0443b3c41513f576),
UINT64_C(0xf9783972f062e661), UINT64_C(0xad9140e0acb2176c), UINT64_C(0x91d78334958edeca),
UINT64_C(0xc53efaa6c95e2fc7), UINT64_C(0x380570102c2f3cd0), UINT64_C(0x6cec098270ffcddd),
UINT64_C(0x60cc8c64717df852), UINT64_C(0x3425f5f62dad095f), UINT64_C(0xc91e7f40c8dc1a48),
UINT64_C(0x9df706d2940ceb45), UINT64_C(0xa1b1c506ad3022e3), UINT64_C(0xf558bc94f1e0d3ee),
UINT64_C(0x086336221491c0f9), UINT64_C(0x5c8a4fb0484131f4), UINT64_C(0x70eeb18a66e853b5),
UINT64_C(0x2407c8183a38a2b8), UINT64_C(0xd93c42aedf49b1af), UINT64_C(0x8dd53b3c839940a2),
UINT64_C(0xb193f8e8baa58904), UINT64_C(0xe57a817ae6757809), UINT64_C(0x18410bcc03046b1e),
UINT64_C(0x4ca8725e5fd49a13), UINT64_C(0x8111ef70bcad5f38), UINT64_C(0xd5f896e2e07dae35),
UINT64_C(0x28c31c54050cbd22), UINT64_C(0x7c2a65c659dc4c2f), UINT64_C(0x406ca61260e08589),
UINT64_C(0x1485df803c307484), UINT64_C(0xe9be5536d9416793), UINT64_C(0xbd572ca48591969e),
UINT64_C(0x9133d29eab38f4df), UINT64_C(0xc5daab0cf7e805d2), UINT64_C(0x38e121ba129916c5),
UINT64_C(0x6c0858284e49e7c8), UINT64_C(0x504e9bfc77752e6e), UINT64_C(0x04a7e26e2ba5df63),
UINT64_C(0xf99c68d8ced4cc74), UINT64_C(0xad75114a92043d79), UINT64_C(0xa15594ac938608f6),
UINT64_C(0xf5bced3ecf56f9fb), UINT64_C(0x088767882a27eaec), UINT64_C(0x5c6e1e1a76f71be1),
UINT64_C(0x6028ddce4fcbd247), UINT64_C(0x34c1a45c131b234a), UINT64_C(0xc9fa2eeaf66a305d),
UINT64_C(0x9d135778aabac150), UINT64_C(0xb177a9428413a311), UINT64_C(0xe59ed0d0d8c3521c),
UINT64_C(0x18a55a663db2410b), UINT64_C(0x4c4c23f46162b006), UINT64_C(0x700ae020585e79a0),
UINT64_C(0x24e399b2048e88ad), UINT64_C(0xd9d81304e1ff9bba), UINT64_C(0x8d316a96bd2f6ab7),
UINT64_C(0xc19918c8e2fbf0a4), UINT64_C(0x9570615abe2b01a9), UINT64_C(0x684bebec5b5a12be),
UINT64_C(0x3ca2927e078ae3b3), UINT64_C(0x00e451aa3eb62a15), UINT64_C(0x540d28386266db18),
UINT64_C(0xa936a28e8717c80f), UINT64_C(0xfddfdb1cdbc73902), UINT64_C(0xd1bb2526f56e5b43),
UINT64_C(0x85525cb4a9beaa4e), UINT64_C(0x7869d6024ccfb959), UINT64_C(0x2c80af90101f4854),
UINT64_C(0x10c66c44292381f2), UINT64_C(0x442f15d675f370ff), UINT64_C(0xb9149f60908263e8),
UINT64_C(0xedfde6f2cc5292e5), UINT64_C(0xe1dd6314cdd0a76a), UINT64_C(0xb5341a8691005667),
UINT64_C(0x480f903074714570), UINT64_C(0x1ce6e9a228a1b47d), UINT64_C(0x20a02a76119d7ddb),
UINT64_C(0x744953e44d4d8cd6), UINT64_C(0x8972d952a83c9fc1), UINT64_C(0xdd9ba0c0f4ec6ecc),
UINT64_C(0xf1ff5efada450c8d), UINT64_C(0xa51627688695fd80), UINT64_C(0x582dadde63e4ee97),
UINT64_C(0x0cc4d44c3f341f9a), UINT64_C(0x308217980608d63c), UINT64_C(0x646b6e0a5ad82731),
UINT64_C(0x9950e4bcbfa93426), UINT64_C(0xcdb99d2ee379c52b), UINT64_C(0x90fb71cad654a0f5),
UINT64_C(0xc41208588a8451f8), UINT64_C(0x392982ee6ff542ef), UINT64_C(0x6dc0fb7c3325b3e2),
UINT64_C(0x518638a80a197a44), UINT64_C(0x056f413a56c98b49), UINT64_C(0xf854cb8cb3b8985e),
UINT64_C(0xacbdb21eef686953), UINT64_C(0x80d94c24c1c10b12), UINT64_C(0xd43035b69d11fa1f),
UINT64_C(0x290bbf007860e908), UINT64_C(0x7de2c69224b01805), UINT64_C(0x41a405461d8cd1a3),
UINT64_C(0x154d7cd4415c20ae), UINT64_C(0xe876f662a42d33b9), UINT64_C(0xbc9f8ff0f8fdc2b4),
UINT64_C(0xb0bf0a16f97ff73b), UINT64_C(0xe4567384a5af0636), UINT64_C(0x196df93240de1521),
UINT64_C(0x4d8480a01c0ee42c), UINT64_C(0x71c2437425322d8a), UINT64_C(0x252b3ae679e2dc87),
UINT64_C(0xd810b0509c93cf90), UINT64_C(0x8cf9c9c2c0433e9d), UINT64_C(0xa09d37f8eeea5cdc),
UINT64_C(0xf4744e6ab23aadd1), UINT64_C(0x094fc4dc574bbec6), UINT64_C(0x5da6bd4e0b9b4fcb),
UINT64_C(0x61e07e9a32a7866d), UINT64_C(0x350907086e777760), UINT64_C(0xc8328dbe8b066477),
UINT64_C(0x9cdbf42cd7d6957a), UINT64_C(0xd073867288020f69), UINT64_C(0x849affe0d4d2fe64),
UINT64_C(0x79a1755631a3ed73), UINT64_C(0x2d480cc46d731c7e), UINT64_C(0x110ecf10544fd5d8),
UINT64_C(0x45e7b682089f24d5), UINT64_C(0xb8dc3c34edee37c2), UINT64_C(0xec3545a6b13ec6cf),
UINT64_C(0xc051bb9c9f97a48e), UINT64_C(0x94b8c20ec3475583), UINT64_C(0x698348b826364694),
UINT64_C(0x3d6a312a7ae6b799), UINT64_C(0x012cf2fe43da7e3f), UINT64_C(0x55c58b6c1f0a8f32),
UINT64_C(0xa8fe01dafa7b9c25), UINT64_C(0xfc177848a6ab6d28), UINT64_C(0xf037fdaea72958a7),
UINT64_C(0xa4de843cfbf9a9aa), UINT64_C(0x59e50e8a1e88babd), UINT64_C(0x0d0c771842584bb0),
UINT64_C(0x314ab4cc7b648216), UINT64_C(0x65a3cd5e27b4731b), UINT64_C(0x989847e8c2c5600c),
UINT64_C(0xcc713e7a9e159101), UINT64_C(0xe015c040b0bcf340), UINT64_C(0xb4fcb9d2ec6c024d),
UINT64_C(0x49c73364091d115a), UINT64_C(0x1d2e4af655cde057), UINT64_C(0x216889226cf129f1),
UINT64_C(0x7581f0b03021d8fc), UINT64_C(0x88ba7a06d550cbeb), UINT64_C(0xdc53039489803ae6),
UINT64_C(0x11ea9eba6af9ffcd), UINT64_C(0x4503e72836290ec0), UINT64_C(0xb8386d9ed3581dd7),
UINT64_C(0xecd1140c8f88ecda), UINT64_C(0xd097d7d8b6b4257c), UINT64_C(0x847eae4aea64d471),
UINT64_C(0x794524fc0f15c766), UINT64_C(0x2dac5d6e53c5366b), UINT64_C(0x01c8a3547d6c542a),
UINT64_C(0x5521dac621bca527), UINT64_C(0xa81a5070c4cdb630), UINT64_C(0xfcf329e2981d473d),
UINT64_C(0xc0b5ea36a1218e9b), UINT64_C(0x945c93a4fdf17f96), UINT64_C(0x6967191218806c81),
UINT64_C(0x3d8e608044509d8c), UINT64_C(0x31aee56645d2a803), UINT64_C(0x65479cf41902590e),
UINT64_C(0x987c1642fc734a19), UINT64_C(0xcc956fd0a0a3bb14), UINT64_C(0xf0d3ac04999f72b2),
UINT64_C(0xa43ad596c54f83bf), UINT64_C(0x59015f20203e90a8), UINT64_C(0x0de826b27cee61a5),
UINT64_C(0x218cd888524703e4), UINT64_C(0x7565a11a0e97f2e9), UINT64_C(0x885e2bacebe6e1fe),
UINT64_C(0xdcb7523eb73610f3), UINT64_C(0xe0f191ea8e0ad955), UINT64_C(0xb418e878d2da2858),
UINT64_C(0x492362ce37ab3b4f), UINT64_C(0x1dca1b5c6b7bca42), UINT64_C(0x5162690234af5051),
UINT64_C(0x058b1090687fa15c), UINT64_C(0xf8b09a268d0eb24b), UINT64_C(0xac59e3b4d1de4346),
UINT64_C(0x901f2060e8e28ae0), UINT64_C(0xc4f659f2b4327bed), UINT64_C(0x39cdd344514368fa),
UINT64_C(0x6d24aad60d9399f7), UINT64_C(0x414054ec233afbb6), UINT64_C(0x15a92d7e7fea0abb),
UINT64_C(0xe892a7c89a9b19ac), UINT64_C(0xbc7bde5ac64be8a1), UINT64_C(0x803d1d8eff772107),
UINT64_C(0xd4d4641ca3a7d00a), UINT64_C(0x29efeeaa46d6c31d), UINT64_C(0x7d0697381a063210),
UINT64_C(0x712612de1b84079f), UINT64_C(0x25cf6b4c4754f692), UINT64_C(0xd8f4e1faa225e585),
UINT64_C(0x8c1d9868fef51488), UINT64_C(0xb05b5bbcc7c9dd2e), UINT64_C(0xe4b2222e9b192c23),
UINT64_C(0x1989a8987e683f34), UINT64_C(0x4d60d10a22b8ce39), UINT64_C(0x61042f300c11ac78),
UINT64_C(0x35ed56a250c15d75), UINT64_C(0xc8d6dc14b5b04e62), UINT64_C(0x9c3fa586e960bf6f),
UINT64_C(0xa0796652d05c76c9), UINT64_C(0xf4901fc08c8c87c4), UINT64_C(0x09ab957669fd94d3),
UINT64_C(0x5d42ece4352d65de) },
{ UINT64_C(0x0000000000000000), UINT64_C(0xb32e4cbe03a75f6f), UINT64_C(0xf4843657a840a05b),
UINT64_C(0x47aa7ae9abe7ff34), UINT64_C(0x7bd0c384ff8f5e33), UINT64_C(0xc8fe8f3afc28015c),
UINT64_C(0x8f54f5d357cffe68), UINT64_C(0x3c7ab96d5468a107), UINT64_C(0xf7a18709ff1ebc66),
UINT64_C(0x448fcbb7fcb9e309), UINT64_C(0x0325b15e575e1c3d), UINT64_C(0xb00bfde054f94352),
UINT64_C(0x8c71448d0091e255), UINT64_C(0x3f5f08330336bd3a), UINT64_C(0x78f572daa8d1420e),
UINT64_C(0xcbdb3e64ab761d61), UINT64_C(0x7d9ba13851336649), UINT64_C(0xceb5ed8652943926),
UINT64_C(0x891f976ff973c612), UINT64_C(0x3a31dbd1fad4997d), UINT64_C(0x064b62bcaebc387a),
UINT64_C(0xb5652e02ad1b6715), UINT64_C(0xf2cf54eb06fc9821), UINT64_C(0x41e11855055bc74e),
UINT64_C(0x8a3a2631ae2dda2f), UINT64_C(0x39146a8fad8a8540), UINT64_C(0x7ebe1066066d7a74),
UINT64_C(0xcd905cd805ca251b), UINT64_C(0xf1eae5b551a2841c), UINT64_C(0x42c4a90b5205db73),
UINT64_C(0x056ed3e2f9e22447), UINT64_C(0xb6409f5cfa457b28), UINT64_C(0xfb374270a266cc92),
UINT64_C(0x48190ecea1c193fd), UINT64_C(0x0fb374270a266cc9), UINT64_C(0xbc9d3899098133a6),
UINT64_C(0x80e781f45de992a1), UINT64_C(0x33c9cd4a5e4ecdce), UINT64_C(0x7463b7a3f5a932fa),
UINT64_C(0xc74dfb1df60e6d95), UINT64_C(0x0c96c5795d7870f4), UINT64_C(0xbfb889c75edf2f9b),
UINT64_C(0xf812f32ef538d0af), UINT64_C(0x4b3cbf90f69f8fc0), UINT64_C(0x774606fda2f72ec7),
UINT64_C(0xc4684a43a15071a8), UINT64_C(0x83c230aa0ab78e9c), UINT64_C(0x30ec7c140910d1f3),
UINT64_C(0x86ace348f355aadb), UINT64_C(0x3582aff6f0f2f5b4), UINT64_C(0x7228d51f5b150a80),
UINT64_C(0xc10699a158b255ef), UINT64_C(0xfd7c20cc0cdaf4e8), UINT64_C(0x4e526c720f7dab87),
UINT64_C(0x09f8169ba49a54b3), UINT64_C(0xbad65a25a73d0bdc), UINT64_C(0x710d64410c4b16bd),
UINT64_C(0xc22328ff0fec49d2), UINT64_C(0x85895216a40bb6e6), UINT64_C(0x36a71ea8a7ace989),
UINT64_C(0x0adda7c5f3c4488e), UINT64_C(0xb9f3eb7bf06317e1), UINT64_C(0xfe5991925b84e8d5),
UINT64_C(0x4d77dd2c5823b7ba), UINT64_C(0x64b62bcaebc387a1), UINT64_C(0xd7986774e864d8ce),
UINT64_C(0x90321d9d438327fa), UINT64_C(0x231c512340247895), UINT64_C(0x1f66e84e144cd992),
UINT64_C(0xac48a4f017eb86fd), UINT64_C(0xebe2de19bc0c79c9), UINT64_C(0x58cc92a7bfab26a6),
UINT64_C(0x9317acc314dd3bc7), UINT64_C(0x2039e07d177a64a8), UINT64_C(0x67939a94bc9d9b9c),
UINT64_C(0xd4bdd62abf3ac4f3), UINT64_C(0xe8c76f47eb5265f4), UINT64_C(0x5be923f9e8f53a9b),
UINT64_C(0x1c4359104312c5af), UINT64_C(0xaf6d15ae40b59ac0), UINT64_C(0x192d8af2baf0e1e8),
UINT64_C(0xaa03c64cb957be87), UINT64_C(0xeda9bca512b041b3), UINT64_C(0x5e87f01b11171edc),
UINT64_C(0x62fd4976457fbfdb), UINT64_C(0xd1d305c846d8e0b4), UINT64_C(0x96797f21ed3f1f80),
UINT64_C(0x2557339fee9840ef), UINT64_C(0xee8c0dfb45ee5d8e), UINT64_C(0x5da24145464902e1),
UINT64_C(0x1a083bacedaefdd5), UINT64_C(0xa9267712ee09a2ba), UINT64_C(0x955cce7fba6103bd),
UINT64_C(0x267282c1b9c65cd2), UINT64_C(0x61d8f8281221a3e6), UINT64_C(0xd2f6b4961186fc89),
UINT64_C(0x9f8169ba49a54b33), UINT64_C(0x2caf25044a02145c), UINT64_C(0x6b055fede1e5eb68),
UINT64_C(0xd82b1353e242b407), UINT64_C(0xe451aa3eb62a1500), UINT64_C(0x577fe680b58d4a6f),
UINT64_C(0x10d59c691e6ab55b), UINT64_C(0xa3fbd0d71dcdea34), UINT64_C(0x6820eeb3b6bbf755),
UINT64_C(0xdb0ea20db51ca83a), UINT64_C(0x9ca4d8e41efb570e), UINT64_C(0x2f8a945a1d5c0861),
UINT64_C(0x13f02d374934a966), UINT64_C(0xa0de61894a93f609), UINT64_C(0xe7741b60e174093d),
UINT64_C(0x545a57dee2d35652), UINT64_C(0xe21ac88218962d7a), UINT64_C(0x5134843c1b317215),
UINT64_C(0x169efed5b0d68d21), UINT64_C(0xa5b0b26bb371d24e), UINT64_C(0x99ca0b06e7197349),
UINT64_C(0x2ae447b8e4be2c26), UINT64_C(0x6d4e3d514f59d312), UINT64_C(0xde6071ef4cfe8c7d),
UINT64_C(0x15bb4f8be788911c), UINT64_C(0xa6950335e42fce73), UINT64_C(0xe13f79dc4fc83147),
UINT64_C(0x521135624c6f6e28), UINT64_C(0x6e6b8c0f1807cf2f), UINT64_C(0xdd45c0b11ba09040),
UINT64_C(0x9aefba58b0476f74), UINT64_C(0x29c1f6e6b3e0301b), UINT64_C(0xc96c5795d7870f42),
UINT64_C(0x7a421b2bd420502d), UINT64_C(0x3de861c27fc7af19), UINT64_C(0x8ec62d7c7c60f076),
UINT64_C(0xb2bc941128085171), UINT64_C(0x0192d8af2baf0e1e), UINT64_C(0x4638a2468048f12a),
UINT64_C(0xf516eef883efae45), UINT64_C(0x3ecdd09c2899b324), UINT64_C(0x8de39c222b3eec4b),
UINT64_C(0xca49e6cb80d9137f), UINT64_C(0x7967aa75837e4c10), UINT64_C(0x451d1318d716ed17),
UINT64_C(0xf6335fa6d4b1b278), UINT64_C(0xb199254f7f564d4c), UINT64_C(0x02b769f17cf11223),
UINT64_C(0xb4f7f6ad86b4690b), UINT64_C(0x07d9ba1385133664), UINT64_C(0x4073c0fa2ef4c950),
UINT64_C(0xf35d8c442d53963f), UINT64_C(0xcf273529793b3738), UINT64_C(0x7c0979977a9c6857),
UINT64_C(0x3ba3037ed17b9763), UINT64_C(0x888d4fc0d2dcc80c), UINT64_C(0x435671a479aad56d),
UINT64_C(0xf0783d1a7a0d8a02), UINT64_C(0xb7d247f3d1ea7536), UINT64_C(0x04fc0b4dd24d2a59),
UINT64_C(0x3886b22086258b5e), UINT64_C(0x8ba8fe9e8582d431), UINT64_C(0xcc0284772e652b05),
UINT64_C(0x7f2cc8c92dc2746a), UINT64_C(0x325b15e575e1c3d0), UINT64_C(0x8175595b76469cbf),
UINT64_C(0xc6df23b2dda1638b), UINT64_C(0x75f16f0cde063ce4), UINT64_C(0x498bd6618a6e9de3),
UINT64_C(0xfaa59adf89c9c28c), UINT64_C(0xbd0fe036222e3db8), UINT64_C(0x0e21ac88218962d7),
UINT64_C(0xc5fa92ec8aff7fb6), UINT64_C(0x76d4de52895820d9), UINT64_C(0x317ea4bb22bfdfed),
UINT64_C(0x8250e80521188082), UINT64_C(0xbe2a516875702185), UINT64_C(0x0d041dd676d77eea),
UINT64_C(0x4aae673fdd3081de), UINT64_C(0xf9802b81de97deb1), UINT64_C(0x4fc0b4dd24d2a599),
UINT64_C(0xfceef8632775faf6), UINT64_C(0xbb44828a8c9205c2), UINT64_C(0x086ace348f355aad),
UINT64_C(0x34107759db5dfbaa), UINT64_C(0x873e3be7d8faa4c5), UINT64_C(0xc094410e731d5bf1),
UINT64_C(0x73ba0db070ba049e), UINT64_C(0xb86133d4dbcc19ff), UINT64_C(0x0b4f7f6ad86b4690),
UINT64_C(0x4ce50583738cb9a4), UINT64_C(0xffcb493d702be6cb), UINT64_C(0xc3b1f050244347cc),
UINT64_C(0x709fbcee27e418a3), UINT64_C(0x3735c6078c03e797), UINT64_C(0x841b8ab98fa4b8f8),
UINT64_C(0xadda7c5f3c4488e3), UINT64_C(0x1ef430e13fe3d78c), UINT64_C(0x595e4a08940428b8),
UINT64_C(0xea7006b697a377d7), UINT64_C(0xd60abfdbc3cbd6d0), UINT64_C(0x6524f365c06c89bf),
UINT64_C(0x228e898c6b8b768b), UINT64_C(0x91a0c532682c29e4), UINT64_C(0x5a7bfb56c35a3485),
UINT64_C(0xe955b7e8c0fd6bea), UINT64_C(0xaeffcd016b1a94de), UINT64_C(0x1dd181bf68bdcbb1),
UINT64_C(0x21ab38d23cd56ab6), UINT64_C(0x9285746c3f7235d9), UINT64_C(0xd52f0e859495caed),
UINT64_C(0x6601423b97329582), UINT64_C(0xd041dd676d77eeaa), UINT64_C(0x636f91d96ed0b1c5),
UINT64_C(0x24c5eb30c5374ef1), UINT64_C(0x97eba78ec690119e), UINT64_C(0xab911ee392f8b099),
UINT64_C(0x18bf525d915feff6), UINT64_C(0x5f1528b43ab810c2), UINT64_C(0xec3b640a391f4fad),
UINT64_C(0x27e05a6e926952cc), UINT64_C(0x94ce16d091ce0da3), UINT64_C(0xd3646c393a29f297),
UINT64_C(0x604a2087398eadf8), UINT64_C(0x5c3099ea6de60cff), UINT64_C(0xef1ed5546e415390),
UINT64_C(0xa8b4afbdc5a6aca4), UINT64_C(0x1b9ae303c601f3cb), UINT64_C(0x56ed3e2f9e224471),
UINT64_C(0xe5c372919d851b1e), UINT64_C(0xa26908783662e42a), UINT64_C(0x114744c635c5bb45),
UINT64_C(0x2d3dfdab61ad1a42), UINT64_C(0x9e13b115620a452d), UINT64_C(0xd9b9cbfcc9edba19),
UINT64_C(0x6a978742ca4ae576), UINT64_C(0xa14cb926613cf817), UINT64_C(0x1262f598629ba778),
UINT64_C(0x55c88f71c97c584c), UINT64_C(0xe6e6c3cfcadb0723), UINT64_C(0xda9c7aa29eb3a624),
UINT64_C(0x69b2361c9d14f94b), UINT64_C(0x2e184cf536f3067f), UINT64_C(0x9d36004b35545910),
UINT64_C(0x2b769f17cf112238), UINT64_C(0x9858d3a9ccb67d57), UINT64_C(0xdff2a94067518263),
UINT64_C(0x6cdce5fe64f6dd0c), UINT64_C(0x50a65c93309e7c0b), UINT64_C(0xe388102d33392364),
UINT64_C(0xa4226ac498dedc50), UINT64_C(0x170c267a9b79833f), UINT64_C(0xdcd7181e300f9e5e),
UINT64_C(0x6ff954a033a8c131), UINT64_C(0x28532e49984f3e05), UINT64_C(0x9b7d62f79be8616a),
UINT64_C(0xa707db9acf80c06d), UINT64_C(0x14299724cc279f02), UINT64_C(0x5383edcd67c06036),
UINT64_C(0xe0ada17364673f59) }
};
static const uint64_t crc64_interleaved_table[4][256] = {
{ UINT64_C(0x0000000000000000), UINT64_C(0xe88a0d0c5521de3d), UINT64_C(0x43ccb533054da2ff),
UINT64_C(0xab46b83f506c7cc2), UINT64_C(0x87996a660a9b45fe), UINT64_C(0x6f13676a5fba9bc3),
UINT64_C(0xc455df550fd6e701), UINT64_C(0x2cdfd2595af7393c), UINT64_C(0x9dea7be7ba389579),
UINT64_C(0x756076ebef194b44), UINT64_C(0xde26ced4bf753786), UINT64_C(0x36acc3d8ea54e9bb),
UINT64_C(0x1a731181b0a3d087), UINT64_C(0xf2f91c8de5820eba), UINT64_C(0x59bfa4b2b5ee7278),
UINT64_C(0xb135a9bee0cfac45), UINT64_C(0xa90c58e4db7f3477), UINT64_C(0x418655e88e5eea4a),
UINT64_C(0xeac0edd7de329688), UINT64_C(0x024ae0db8b1348b5), UINT64_C(0x2e953282d1e47189),
UINT64_C(0xc61f3f8e84c5afb4), UINT64_C(0x6d5987b1d4a9d376), UINT64_C(0x85d38abd81880d4b),
UINT64_C(0x34e623036147a10e), UINT64_C(0xdc6c2e0f34667f33), UINT64_C(0x772a9630640a03f1),
UINT64_C(0x9fa09b3c312bddcc), UINT64_C(0xb37f49656bdce4f0), UINT64_C(0x5bf544693efd3acd),
UINT64_C(0xf0b3fc566e91460f), UINT64_C(0x1839f15a3bb09832), UINT64_C(0xc0c01ee219f0766b),
UINT64_C(0x284a13ee4cd1a856), UINT64_C(0x830cabd11cbdd494), UINT64_C(0x6b86a6dd499c0aa9),
UINT64_C(0x47597484136b3395), UINT64_C(0xafd37988464aeda8), UINT64_C(0x0495c1b71626916a),
UINT64_C(0xec1fccbb43074f57), UINT64_C(0x5d2a6505a3c8e312), UINT64_C(0xb5a06809f6e93d2f),
UINT64_C(0x1ee6d036a68541ed), UINT64_C(0xf66cdd3af3a49fd0), UINT64_C(0xdab30f63a953a6ec),
UINT64_C(0x3239026ffc7278d1), UINT64_C(0x997fba50ac1e0413), UINT64_C(0x71f5b75cf93fda2e),
UINT64_C(0x69cc4606c28f421c), UINT64_C(0x81464b0a97ae9c21), UINT64_C(0x2a00f335c7c2e0e3),
UINT64_C(0xc28afe3992e33ede), UINT64_C(0xee552c60c81407e2), UINT64_C(0x06df216c9d35d9df),
UINT64_C(0xad999953cd59a51d), UINT64_C(0x4513945f98787b20), UINT64_C(0xf4263de178b7d765),
UINT64_C(0x1cac30ed2d960958), UINT64_C(0xb7ea88d27dfa759a), UINT64_C(0x5f6085de28dbaba7),
UINT64_C(0x73bf5787722c929b), UINT64_C(0x9b355a8b270d4ca6), UINT64_C(0x3073e2b477613064),
UINT64_C(0xd8f9efb82240ee59), UINT64_C(0x135892ef9ceef253), UINT64_C(0xfbd29fe3c9cf2c6e),
UINT64_C(0x509427dc99a350ac), UINT64_C(0xb81e2ad0cc828e91), UINT64_C(0x94c1f8899675b7ad),
UINT64_C(0x7c4bf585c3546990), UINT64_C(0xd70d4dba93381552), UINT64_C(0x3f8740b6c619cb6f),
UINT64_C(0x8eb2e90826d6672a), UINT64_C(0x6638e40473f7b917), UINT64_C(0xcd7e5c3b239bc5d5),
UINT64_C(0x25f4513776ba1be8), UINT64_C(0x092b836e2c4d22d4), UINT64_C(0xe1a18e62796cfce9),
UINT64_C(0x4ae7365d2900802b), UINT64_C(0xa26d3b517c215e16), UINT64_C(0xba54ca0b4791c624),
UINT64_C(0x52dec70712b01819), UINT64_C(0xf9987f3842dc64db), UINT64_C(0x1112723417fdbae6),
UINT64_C(0x3dcda06d4d0a83da), UINT64_C(0xd547ad61182b5de7), UINT64_C(0x7e01155e48472125),
UINT64_C(0x968b18521d66ff18), UINT64_C(0x27beb1ecfda9535d), UINT64_C(0xcf34bce0a8888d60),
UINT64_C(0x647204dff8e4f1a2), UINT64_C(0x8cf809d3adc52f9f), UINT64_C(0xa027db8af73216a3),
UINT64_C(0x48add686a213c89e), UINT64_C(0xe3eb6eb9f27fb45c), UINT64_C(0x0b6163b5a75e6a61),
UINT64_C(0xd3988c0d851e8438), UINT64_C(0x3b128101d03f5a05), UINT64_C(0x9054393e805326c7),
UINT64_C(0x78de3432d572f8fa), UINT64_C(0x5401e66b8f85c1c6), UINT64_C(0xbc8beb67daa41ffb),
UINT64_C(0x17cd53588ac86339), UINT64_C(0xff475e54dfe9bd04), UINT64_C(0x4e72f7ea3f261141),
UINT64_C(0xa6f8fae66a07cf7c), UINT64_C(0x0dbe42d93a6bb3be), UINT64_C(0xe5344fd56f4a6d83),
UINT64_C(0xc9eb9d8c35bd54bf), UINT64_C(0x21619080609c8a82), UINT64_C(0x8a2728bf30f0f640),
UINT64_C(0x62ad25b365d1287d), UINT64_C(0x7a94d4e95e61b04f), UINT64_C(0x921ed9e50b406e72),
UINT64_C(0x395861da5b2c12b0), UINT64_C(0xd1d26cd60e0dcc8d), UINT64_C(0xfd0dbe8f54faf5b1),
UINT64_C(0x1587b38301db2b8c), UINT64_C(0xbec10bbc51b7574e), UINT64_C(0x564b06b004968973),
UINT64_C(0xe77eaf0ee4592536), UINT64_C(0x0ff4a202b178fb0b), UINT64_C(0xa4b21a3de11487c9),
UINT64_C(0x4c381731b43559f4), UINT64_C(0x60e7c568eec260c8), UINT64_C(0x886dc864bbe3bef5),
UINT64_C(0x232b705beb8fc237), UINT64_C(0xcba17d57beae1c0a), UINT64_C(0x26b125df39dde4a6),
UINT64_C(0xce3b28d36cfc3a9b), UINT64_C(0x657d90ec3c904659), UINT64_C(0x8df79de069b19864),
UINT64_C(0xa1284fb93346a158), UINT64_C(0x49a242b566677f65), UINT64_C(0xe2e4fa8a360b03a7),
UINT64_C(0x0a6ef786632add9a), UINT64_C(0xbb5b5e3883e571df), UINT64_C(0x53d15334d6c4afe2),
UINT64_C(0xf897eb0b86a8d320), UINT64_C(0x101de607d3890d1d), UINT64_C(0x3cc2345e897e3421),
UINT64_C(0xd4483952dc5fea1c), UINT64_C(0x7f0e816d8c3396de), UINT64_C(0x97848c61d91248e3),
UINT64_C(0x8fbd7d3be2a2d0d1), UINT64_C(0x67377037b7830eec), UINT64_C(0xcc71c808e7ef722e),
UINT64_C(0x24fbc504b2ceac13), UINT64_C(0x0824175de839952f), UINT64_C(0xe0ae1a51bd184b12),
UINT64_C(0x4be8a26eed7437d0), UINT64_C(0xa362af62b855e9ed), UINT64_C(0x125706dc589a45a8),
UINT64_C(0xfadd0bd00dbb9b95), UINT64_C(0x519bb3ef5dd7e757), UINT64_C(0xb911bee308f6396a),
UINT64_C(0x95ce6cba52010056), UINT64_C(0x7d4461b60720de6b), UINT64_C(0xd602d989574ca2a9),
UINT64_C(0x3e88d485026d7c94), UINT64_C(0xe6713b3d202d92cd), UINT64_C(0x0efb3631750c4cf0),
UINT64_C(0xa5bd8e0e25603032), UINT64_C(0x4d3783027041ee0f), UINT64_C(0x61e8515b2ab6d733),
UINT64_C(0x89625c577f97090e), UINT64_C(0x2224e4682ffb75cc), UINT64_C(0xcaaee9647adaabf1),
UINT64_C(0x7b9b40da9a1507b4), UINT64_C(0x93114dd6cf34d989), UINT64_C(0x3857f5e99f58a54b),
UINT64_C(0xd0ddf8e5ca797b76), UINT64_C(0xfc022abc908e424a), UINT64_C(0x148827b0c5af9c77),
UINT64_C(0xbfce9f8f95c3e0b5), UINT64_C(0x57449283c0e23e88), UINT64_C(0x4f7d63d9fb52a6ba),
UINT64_C(0xa7f76ed5ae737887), UINT64_C(0x0cb1d6eafe1f0445), UINT64_C(0xe43bdbe6ab3eda78),
UINT64_C(0xc8e409bff1c9e344), UINT64_C(0x206e04b3a4e83d79), UINT64_C(0x8b28bc8cf48441bb),
UINT64_C(0x63a2b180a1a59f86), UINT64_C(0xd297183e416a33c3), UINT64_C(0x3a1d1532144bedfe),
UINT64_C(0x915bad0d4427913c), UINT64_C(0x79d1a00111064f01), UINT64_C(0x550e72584bf1763d),
UINT64_C(0xbd847f541ed0a800), UINT64_C(0x16c2c76b4ebcd4c2), UINT64_C(0xfe48ca671b9d0aff),
UINT64_C(0x35e9b730a53316f5), UINT64_C(0xdd63ba3cf012c8c8), UINT64_C(0x76250203a07eb40a),
UINT64_C(0x9eaf0f0ff55f6a37), UINT64_C(0xb270dd56afa8530b), UINT64_C(0x5afad05afa898d36),
UINT64_C(0xf1bc6865aae5f1f4), UINT64_C(0x19366569ffc42fc9), UINT64_C(0xa803ccd71f0b838c),
UINT64_C(0x4089c1db4a2a5db1), UINT64_C(0xebcf79e41a462173), UINT64_C(0x034574e84f67ff4e),
UINT64_C(0x2f9aa6b11590c672), UINT64_C(0xc710abbd40b1184f), UINT64_C(0x6c56138210dd648d),
UINT64_C(0x84dc1e8e45fcbab0), UINT64_C(0x9ce5efd47e4c2282), UINT64_C(0x746fe2d82b6dfcbf),
UINT64_C(0xdf295ae77b01807d), UINT64_C(0x37a357eb2e205e40), UINT64_C(0x1b7c85b274d7677c),
UINT64_C(0xf3f688be21f6b941), UINT64_C(0x58b03081719ac583), UINT64_C(0xb03a3d8d24bb1bbe),
UINT64_C(0x010f9433c474b7fb), UINT64_C(0xe985993f915569c6), UINT64_C(0x42c32100c1391504),
UINT64_C(0xaa492c0c9418cb39), UINT64_C(0x8696fe55ceeff205), UINT64_C(0x6e1cf3599bce2c38),
UINT64_C(0xc55a4b66cba250fa), UINT64_C(0x2dd0466a9e838ec7), UINT64_C(0xf529a9d2bcc3609e),
UINT64_C(0x1da3a4dee9e2bea3), UINT64_C(0xb6e51ce1b98ec261), UINT64_C(0x5e6f11edecaf1c5c),
UINT64_C(0x72b0c3b4b6582560), UINT64_C(0x9a3aceb8e379fb5d), UINT64_C(0x317c7687b315879f),
UINT64_C(0xd9f67b8be63459a2), UINT64_C(0x68c3d23506fbf5e7), UINT64_C(0x8049df3953da2bda),
UINT64_C(0x2b0f670603b65718), UINT64_C(0xc3856a0a56978925), UINT64_C(0xef5ab8530c60b019),
UINT64_C(0x07d0b55f59416e24), UINT64_C(0xac960d60092d12e6), UINT64_C(0x441c006c5c0cccdb),
UINT64_C(0x5c25f13667bc54e9), UINT64_C(0xb4affc3a329d8ad4), UINT64_C(0x1fe9440562f1f616),
UINT64_C(0xf763490937d0282b), UINT64_C(0xdbbc9b506d271117), UINT64_C(0x3336965c3806cf2a),
UINT64_C(0x98702e63686ab3e8), UINT64_C(0x70fa236f3d4b6dd5), UINT64_C(0xc1cf8ad1dd84c190),
UINT64_C(0x294587dd88a51fad), UINT64_C(0x82033fe2d8c9636f), UINT64_C(0x6a8932ee8de8bd52),
UINT64_C(0x4656e0b7d71f846e), UINT64_C(0xaedcedbb823e5a53), UINT64_C(0x059a5584d2522691),
UINT64_C(0xed1058888773f8ac) },
{ UINT64_C(0x0000000000000000), UINT64_C(0x4d624bbe73bbc94c), UINT64_C(0x9ac4977ce7779298),
UINT64_C(0xd7a6dcc294cc5bd4), UINT64_C(0xa75181d261e13bb5), UINT64_C(0xea33ca6c125af2f9),
UINT64_C(0x3d9516ae8696a92d), UINT64_C(0x70f75d10f52d6061), UINT64_C(0xdc7bac8f6ccc69ef),
UINT64_C(0x9119e7311f77a0a3), UINT64_C(0x46bf3bf38bbbfb77), UINT64_C(0x0bdd704df800323b),
UINT64_C(0x7b2a2d5d0d2d525a), UINT64_C(0x364866e37e969b16), UINT64_C(0xe1eeba21ea5ac0c2),
UINT64_C(0xac8cf19f99e1098e), UINT64_C(0x2a2ff6357696cd5b), UINT64_C(0x674dbd8b052d0417),
UINT64_C(0xb0eb614991e15fc3), UINT64_C(0xfd892af7e25a968f), UINT64_C(0x8d7e77e71777f6ee),
UINT64_C(0xc01c3c5964cc3fa2), UINT64_C(0x17bae09bf0006476), UINT64_C(0x5ad8ab2583bbad3a),
UINT64_C(0xf6545aba1a5aa4b4), UINT64_C(0xbb36110469e16df8), UINT64_C(0x6c90cdc6fd2d362c),
UINT64_C(0x21f286788e96ff60), UINT64_C(0x5105db687bbb9f01), UINT64_C(0x1c6790d60800564d),
UINT64_C(0xcbc14c149ccc0d99), UINT64_C(0x86a307aaef77c4d5), UINT64_C(0x545fec6aed2d9ab6),
UINT64_C(0x193da7d49e9653fa), UINT64_C(0xce9b7b160a5a082e), UINT64_C(0x83f930a879e1c162),
UINT64_C(0xf30e6db88ccca103), UINT64_C(0xbe6c2606ff77684f), UINT64_C(0x69cafac46bbb339b),
UINT64_C(0x24a8b17a1800fad7), UINT64_C(0x882440e581e1f359), UINT64_C(0xc5460b5bf25a3a15),
UINT64_C(0x12e0d799669661c1), UINT64_C(0x5f829c27152da88d), UINT64_C(0x2f75c137e000c8ec),
UINT64_C(0x62178a8993bb01a0), UINT64_C(0xb5b1564b07775a74), UINT64_C(0xf8d31df574cc9338),
UINT64_C(0x7e701a5f9bbb57ed), UINT64_C(0x331251e1e8009ea1), UINT64_C(0xe4b48d237cccc575),
UINT64_C(0xa9d6c69d0f770c39), UINT64_C(0xd9219b8dfa5a6c58), UINT64_C(0x9443d03389e1a514),
UINT64_C(0x43e50cf11d2dfec0), UINT64_C(0x0e87474f6e96378c), UINT64_C(0xa20bb6d0f7773e02),
UINT64_C(0xef69fd6e84ccf74e), UINT64_C(0x38cf21ac1000ac9a), UINT64_C(0x75ad6a1263bb65d6),
UINT64_C(0x055a3702969605b7), UINT64_C(0x48387cbce52dccfb), UINT64_C(0x9f9ea07e71e1972f),
UINT64_C(0xd2fcebc0025a5e63), UINT64_C(0xa8bfd8d5da5b356c), UINT64_C(0xe5dd936ba9e0fc20),
UINT64_C(0x327b4fa93d2ca7f4), UINT64_C(0x7f1904174e976eb8), UINT64_C(0x0fee5907bbba0ed9),
UINT64_C(0x428c12b9c801c795), UINT64_C(0x952ace7b5ccd9c41), UINT64_C(0xd84885c52f76550d),
UINT64_C(0x74c4745ab6975c83), UINT64_C(0x39a63fe4c52c95cf), UINT64_C(0xee00e32651e0ce1b),
UINT64_C(0xa362a898225b0757), UINT64_C(0xd395f588d7766736), UINT64_C(0x9ef7be36a4cdae7a),
UINT64_C(0x495162f43001f5ae), UINT64_C(0x0433294a43ba3ce2), UINT64_C(0x82902ee0accdf837),
UINT64_C(0xcff2655edf76317b), UINT64_C(0x1854b99c4bba6aaf), UINT64_C(0x5536f2223801a3e3),
UINT64_C(0x25c1af32cd2cc382), UINT64_C(0x68a3e48cbe970ace), UINT64_C(0xbf05384e2a5b511a),
UINT64_C(0xf26773f059e09856), UINT64_C(0x5eeb826fc00191d8), UINT64_C(0x1389c9d1b3ba5894),
UINT64_C(0xc42f151327760340), UINT64_C(0x894d5ead54cdca0c), UINT64_C(0xf9ba03bda1e0aa6d),
UINT64_C(0xb4d84803d25b6321), UINT64_C(0x637e94c1469738f5), UINT64_C(0x2e1cdf7f352cf1b9),
UINT64_C(0xfce034bf3776afda), UINT64_C(0xb1827f0144cd6696), UINT64_C(0x6624a3c3d0013d42),
UINT64_C(0x2b46e87da3baf40e), UINT64_C(0x5bb1b56d5697946f), UINT64_C(0x16d3fed3252c5d23),
UINT64_C(0xc1752211b1e006f7), UINT64_C(0x8c1769afc25bcfbb), UINT64_C(0x209b98305bbac635),
UINT64_C(0x6df9d38e28010f79), UINT64_C(0xba5f0f4cbccd54ad), UINT64_C(0xf73d44f2cf769de1),
UINT64_C(0x87ca19e23a5bfd80), UINT64_C(0xcaa8525c49e034cc), UINT64_C(0x1d0e8e9edd2c6f18),
UINT64_C(0x506cc520ae97a654), UINT64_C(0xd6cfc28a41e06281), UINT64_C(0x9bad8934325babcd),
UINT64_C(0x4c0b55f6a697f019), UINT64_C(0x01691e48d52c3955), UINT64_C(0x719e435820015934),
UINT64_C(0x3cfc08e653ba9078), UINT64_C(0xeb5ad424c776cbac), UINT64_C(0xa6389f9ab4cd02e0),
UINT64_C(0x0ab46e052d2c0b6e), UINT64_C(0x47d625bb5e97c222), UINT64_C(0x9070f979ca5b99f6),
UINT64_C(0xdd12b2c7b9e050ba), UINT64_C(0xade5efd74ccd30db), UINT64_C(0xe087a4693f76f997),
UINT64_C(0x372178ababbaa243), UINT64_C(0x7a433315d8016b0f), UINT64_C(0xc3a71e801bb8745d),
UINT64_C(0x8ec5553e6803bd11), UINT64_C(0x596389fcfccfe6c5), UINT64_C(0x1401c2428f742f89),
UINT64_C(0x64f69f527a594fe8), UINT64_C(0x2994d4ec09e286a4), UINT64_C(0xfe32082e9d2edd70),
UINT64_C(0xb3504390ee95143c), UINT64_C(0x1fdcb20f77741db2), UINT64_C(0x52bef9b104cfd4fe),
UINT64_C(0x8518257390038f2a), UINT64_C(0xc87a6ecde3b84666), UINT64_C(0xb88d33dd16952607),
UINT64_C(0xf5ef7863652eef4b), UINT64_C(0x2249a4a1f1e2b49f), UINT64_C(0x6f2bef1f82597dd3),
UINT64_C(0xe988e8b56d2eb906), UINT64_C(0xa4eaa30b1e95704a), UINT64_C(0x734c7fc98a592b9e),
UINT64_C(0x3e2e3477f9e2e2d2), UINT64_C(0x4ed969670ccf82b3), UINT64_C(0x03bb22d97f744bff),
UINT64_C(0xd41dfe1bebb8102b), UINT64_C(0x997fb5a59803d967), UINT64_C(0x35f3443a01e2d0e9),
UINT64_C(0x78910f84725919a5), UINT64_C(0xaf37d346e6954271), UINT64_C(0xe25598f8952e8b3d),
UINT64_C(0x92a2c5e86003eb5c), UINT64_C(0xdfc08e5613b82210), UINT64_C(0x08665294877479c4),
UINT64_C(0x4504192af4cfb088), UINT64_C(0x97f8f2eaf695eeeb), UINT64_C(0xda9ab954852e27a7),
UINT64_C(0x0d3c659611e27c73), UINT64_C(0x405e2e286259b53f), UINT64_C(0x30a973389774d55e),
UINT64_C(0x7dcb3886e4cf1c12), UINT64_C(0xaa6de444700347c6), UINT64_C(0xe70faffa03b88e8a),
UINT64_C(0x4b835e659a598704), UINT64_C(0x06e115dbe9e24e48), UINT64_C(0xd147c9197d2e159c),
UINT64_C(0x9c2582a70e95dcd0), UINT64_C(0xecd2dfb7fbb8bcb1), UINT64_C(0xa1b09409880375fd),
UINT64_C(0x761648cb1ccf2e29), UINT64_C(0x3b7403756f74e765), UINT64_C(0xbdd704df800323b0),
UINT64_C(0xf0b54f61f3b8eafc), UINT64_C(0x271393a36774b128), UINT64_C(0x6a71d81d14cf7864),
UINT64_C(0x1a86850de1e21805), UINT64_C(0x57e4ceb39259d149), UINT64_C(0x8042127106958a9d),
UINT64_C(0xcd2059cf752e43d1), UINT64_C(0x61aca850eccf4a5f), UINT64_C(0x2ccee3ee9f748313),
UINT64_C(0xfb683f2c0bb8d8c7), UINT64_C(0xb60a74927803118b), UINT64_C(0xc6fd29828d2e71ea),
UINT64_C(0x8b9f623cfe95b8a6), UINT64_C(0x5c39befe6a59e372), UINT64_C(0x115bf54019e22a3e),
UINT64_C(0x6b18c655c1e34131), UINT64_C(0x267a8debb258887d), UINT64_C(0xf1dc51292694d3a9),
UINT64_C(0xbcbe1a97552f1ae5), UINT64_C(0xcc494787a0027a84), UINT64_C(0x812b0c39d3b9b3c8),
UINT64_C(0x568dd0fb4775e81c), UINT64_C(0x1bef9b4534ce2150), UINT64_C(0xb7636adaad2f28de),
UINT64_C(0xfa012164de94e192), UINT64_C(0x2da7fda64a58ba46), UINT64_C(0x60c5b61839e3730a),
UINT64_C(0x1032eb08ccce136b), UINT64_C(0x5d50a0b6bf75da27), UINT64_C(0x8af67c742bb981f3),
UINT64_C(0xc79437ca580248bf), UINT64_C(0x41373060b7758c6a), UINT64_C(0x0c557bdec4ce4526),
UINT64_C(0xdbf3a71c50021ef2), UINT64_C(0x9691eca223b9d7be), UINT64_C(0xe666b1b2d694b7df),
UINT64_C(0xab04fa0ca52f7e93), UINT64_C(0x7ca226ce31e32547), UINT64_C(0x31c06d704258ec0b),
UINT64_C(0x9d4c9cefdbb9e585), UINT64_C(0xd02ed751a8022cc9), UINT64_C(0x07880b933cce771d),
UINT64_C(0x4aea402d4f75be51), UINT64_C(0x3a1d1d3dba58de30), UINT64_C(0x777f5683c9e3177c),
UINT64_C(0xa0d98a415d2f4ca8), UINT64_C(0xedbbc1ff2e9485e4), UINT64_C(0x3f472a3f2ccedb87),
UINT64_C(0x722561815f7512cb), UINT64_C(0xa583bd43cbb9491f), UINT64_C(0xe8e1f6fdb8028053),
UINT64_C(0x9816abed4d2fe032), UINT64_C(0xd574e0533e94297e), UINT64_C(0x02d23c91aa5872aa),
UINT64_C(0x4fb0772fd9e3bbe6), UINT64_C(0xe33c86b04002b268), UINT64_C(0xae5ecd0e33b97b24),
UINT64_C(0x79f811cca77520f0), UINT64_C(0x349a5a72d4cee9bc), UINT64_C(0x446d076221e389dd),
UINT64_C(0x090f4cdc52584091), UINT64_C(0xdea9901ec6941b45), UINT64_C(0x93cbdba0b52fd209),
UINT64_C(0x1568dc0a5a5816dc), UINT64_C(0x580a97b429e3df90), UINT64_C(0x8fac4b76bd2f8444),
UINT64_C(0xc2ce00c8ce944d08), UINT64_C(0xb2395dd83bb92d69), UINT64_C(0xff5b16664802e425),
UINT64_C(0x28fdcaa4dccebff1), UINT64_C(0x659f811aaf7576bd), UINT64_C(0xc913708536947f33),
UINT64_C(0x84713b3b452fb67f), UINT64_C(0x53d7e7f9d1e3edab), UINT64_C(0x1eb5ac47a25824e7),
UINT64_C(0x6e42f15757754486), UINT64_C(0x2320bae924ce8dca), UINT64_C(0xf486662bb002d61e),
UINT64_C(0xb9e42d95c3b91f52) },
{ UINT64_C(0x0000000000000000), UINT64_C(0x1596922b987ef63f), UINT64_C(0x2b2d245730fdec7e),
UINT64_C(0x3ebbb67ca8831a41), UINT64_C(0x565a48ae61fbd8fc), UINT64_C(0x43ccda85f9852ec3),
UINT64_C(0x7d776cf951063482), UINT64_C(0x68e1fed2c978c2bd), UINT64_C(0xacb4915cc3f7b1f8),
UINT64_C(0xb92203775b8947c7), UINT64_C(0x8799b50bf30a5d86), UINT64_C(0x920f27206b74abb9),
UINT64_C(0xfaeed9f2a20c6904), UINT64_C(0xef784bd93a729f3b), UINT64_C(0xd1c3fda592f1857a),
UINT64_C(0xc4556f8e0a8f7345), UINT64_C(0xcbb18d9228e17d75), UINT64_C(0xde271fb9b09f8b4a),
UINT64_C(0xe09ca9c5181c910b), UINT64_C(0xf50a3bee80626734), UINT64_C(0x9debc53c491aa589),
UINT64_C(0x887d5717d16453b6), UINT64_C(0xb6c6e16b79e749f7), UINT64_C(0xa3507340e199bfc8),
UINT64_C(0x67051cceeb16cc8d), UINT64_C(0x72938ee573683ab2), UINT64_C(0x4c283899dbeb20f3),
UINT64_C(0x59beaab24395d6cc), UINT64_C(0x315f54608aed1471), UINT64_C(0x24c9c64b1293e24e),
UINT64_C(0x1a727037ba10f80f), UINT64_C(0x0fe4e21c226e0e30), UINT64_C(0x05bbb40ffecce46f),
UINT64_C(0x102d262466b21250), UINT64_C(0x2e969058ce310811), UINT64_C(0x3b000273564ffe2e),
UINT64_C(0x53e1fca19f373c93), UINT64_C(0x46776e8a0749caac), UINT64_C(0x78ccd8f6afcad0ed),
UINT64_C(0x6d5a4add37b426d2), UINT64_C(0xa90f25533d3b5597), UINT64_C(0xbc99b778a545a3a8),
UINT64_C(0x822201040dc6b9e9), UINT64_C(0x97b4932f95b84fd6), UINT64_C(0xff556dfd5cc08d6b),
UINT64_C(0xeac3ffd6c4be7b54), UINT64_C(0xd47849aa6c3d6115), UINT64_C(0xc1eedb81f443972a),
UINT64_C(0xce0a399dd62d991a), UINT64_C(0xdb9cabb64e536f25), UINT64_C(0xe5271dcae6d07564),
UINT64_C(0xf0b18fe17eae835b), UINT64_C(0x98507133b7d641e6), UINT64_C(0x8dc6e3182fa8b7d9),
UINT64_C(0xb37d5564872bad98), UINT64_C(0xa6ebc74f1f555ba7), UINT64_C(0x62bea8c115da28e2),
UINT64_C(0x77283aea8da4dedd), UINT64_C(0x49938c962527c49c), UINT64_C(0x5c051ebdbd5932a3),
UINT64_C(0x34e4e06f7421f01e), UINT64_C(0x21727244ec5f0621), UINT64_C(0x1fc9c43844dc1c60),
UINT64_C(0x0a5f5613dca2ea5f), UINT64_C(0x0b77681ffd99c8de), UINT64_C(0x1ee1fa3465e73ee1),
UINT64_C(0x205a4c48cd6424a0), UINT64_C(0x35ccde63551ad29f), UINT64_C(0x5d2d20b19c621022),
UINT64_C(0x48bbb29a041ce61d), UINT64_C(0x760004e6ac9ffc5c), UINT64_C(0x639696cd34e10a63),
UINT64_C(0xa7c3f9433e6e7926), UINT64_C(0xb2556b68a6108f19), UINT64_C(0x8ceedd140e939558),
UINT64_C(0x99784f3f96ed6367), UINT64_C(0xf199b1ed5f95a1da), UINT64_C(0xe40f23c6c7eb57e5),
UINT64_C(0xdab495ba6f684da4), UINT64_C(0xcf220791f716bb9b), UINT64_C(0xc0c6e58dd578b5ab),
UINT64_C(0xd55077a64d064394), UINT64_C(0xebebc1dae58559d5), UINT64_C(0xfe7d53f17dfbafea),
UINT64_C(0x969cad23b4836d57), UINT64_C(0x830a3f082cfd9b68), UINT64_C(0xbdb18974847e8129),
UINT64_C(0xa8271b5f1c007716), UINT64_C(0x6c7274d1168f0453), UINT64_C(0x79e4e6fa8ef1f26c),
UINT64_C(0x475f50862672e82d), UINT64_C(0x52c9c2adbe0c1e12), UINT64_C(0x3a283c7f7774dcaf),
UINT64_C(0x2fbeae54ef0a2a90), UINT64_C(0x11051828478930d1), UINT64_C(0x04938a03dff7c6ee),
UINT64_C(0x0eccdc1003552cb1), UINT64_C(0x1b5a4e3b9b2bda8e), UINT64_C(0x25e1f84733a8c0cf),
UINT64_C(0x30776a6cabd636f0), UINT64_C(0x589694be62aef44d), UINT64_C(0x4d000695fad00272),
UINT64_C(0x73bbb0e952531833), UINT64_C(0x662d22c2ca2dee0c), UINT64_C(0xa2784d4cc0a29d49),
UINT64_C(0xb7eedf6758dc6b76), UINT64_C(0x8955691bf05f7137), UINT64_C(0x9cc3fb3068218708),
UINT64_C(0xf42205e2a15945b5), UINT64_C(0xe1b497c93927b38a), UINT64_C(0xdf0f21b591a4a9cb),
UINT64_C(0xca99b39e09da5ff4), UINT64_C(0xc57d51822bb451c4), UINT64_C(0xd0ebc3a9b3caa7fb),
UINT64_C(0xee5075d51b49bdba), UINT64_C(0xfbc6e7fe83374b85), UINT64_C(0x9327192c4a4f8938),
UINT64_C(0x86b18b07d2317f07), UINT64_C(0xb80a3d7b7ab26546), UINT64_C(0xad9caf50e2cc9379),
UINT64_C(0x69c9c0dee843e03c), UINT64_C(0x7c5f52f5703d1603), UINT64_C(0x42e4e489d8be0c42),
UINT64_C(0x577276a240c0fa7d), UINT64_C(0x3f93887089b838c0), UINT64_C(0x2a051a5b11c6ceff),
UINT64_C(0x14beac27b945d4be), UINT64_C(0x01283e0c213b2281), UINT64_C(0x16eed03ffb3391bc),
UINT64_C(0x03784214634d6783), UINT64_C(0x3dc3f468cbce7dc2), UINT64_C(0x2855664353b08bfd),
UINT64_C(0x40b498919ac84940), UINT64_C(0x55220aba02b6bf7f), UINT64_C(0x6b99bcc6aa35a53e),
UINT64_C(0x7e0f2eed324b5301), UINT64_C(0xba5a416338c42044), UINT64_C(0xafccd348a0bad67b),
UINT64_C(0x917765340839cc3a), UINT64_C(0x84e1f71f90473a05), UINT64_C(0xec0009cd593ff8b8),
UINT64_C(0xf9969be6c1410e87), UINT64_C(0xc72d2d9a69c214c6), UINT64_C(0xd2bbbfb1f1bce2f9),
UINT64_C(0xdd5f5dadd3d2ecc9), UINT64_C(0xc8c9cf864bac1af6), UINT64_C(0xf67279fae32f00b7),
UINT64_C(0xe3e4ebd17b51f688), UINT64_C(0x8b051503b2293435), UINT64_C(0x9e9387282a57c20a),
UINT64_C(0xa028315482d4d84b), UINT64_C(0xb5bea37f1aaa2e74), UINT64_C(0x71ebccf110255d31),
UINT64_C(0x647d5eda885bab0e), UINT64_C(0x5ac6e8a620d8b14f), UINT64_C(0x4f507a8db8a64770),
UINT64_C(0x27b1845f71de85cd), UINT64_C(0x32271674e9a073f2), UINT64_C(0x0c9ca008412369b3),
UINT64_C(0x190a3223d95d9f8c), UINT64_C(0x1355643005ff75d3), UINT64_C(0x06c3f61b9d8183ec),
UINT64_C(0x38784067350299ad), UINT64_C(0x2deed24cad7c6f92), UINT64_C(0x450f2c9e6404ad2f),
UINT64_C(0x5099beb5fc7a5b10), UINT64_C(0x6e2208c954f94151), UINT64_C(0x7bb49ae2cc87b76e),
UINT64_C(0xbfe1f56cc608c42b), UINT64_C(0xaa7767475e763214), UINT64_C(0x94ccd13bf6f52855),
UINT64_C(0x815a43106e8bde6a), UINT64_C(0xe9bbbdc2a7f31cd7), UINT64_C(0xfc2d2fe93f8deae8),
UINT64_C(0xc2969995970ef0a9), UINT64_C(0xd7000bbe0f700696), UINT64_C(0xd8e4e9a22d1e08a6),
UINT64_C(0xcd727b89b560fe99), UINT64_C(0xf3c9cdf51de3e4d8), UINT64_C(0xe65f5fde859d12e7),
UINT64_C(0x8ebea10c4ce5d05a), UINT64_C(0x9b283327d49b2665), UINT64_C(0xa593855b7c183c24),
UINT64_C(0xb0051770e466ca1b), UINT64_C(0x745078feeee9b95e), UINT64_C(0x61c6ead576974f61),
UINT64_C(0x5f7d5ca9de145520), UINT64_C(0x4aebce82466aa31f), UINT64_C(0x220a30508f1261a2),
UINT64_C(0x379ca27b176c979d), UINT64_C(0x09271407bfef8ddc), UINT64_C(0x1cb1862c27917be3),
UINT64_C(0x1d99b82006aa5962), UINT64_C(0x080f2a0b9ed4af5d), UINT64_C(0x36b49c773657b51c),
UINT64_C(0x23220e5cae294323), UINT64_C(0x4bc3f08e6751819e), UINT64_C(0x5e5562a5ff2f77a1),
UINT64_C(0x60eed4d957ac6de0), UINT64_C(0x757846f2cfd29bdf), UINT64_C(0xb12d297cc55de89a),
UINT64_C(0xa4bbbb575d231ea5), UINT64_C(0x9a000d2bf5a004e4), UINT64_C(0x8f969f006ddef2db),
UINT64_C(0xe77761d2a4a63066), UINT64_C(0xf2e1f3f93cd8c659), UINT64_C(0xcc5a4585945bdc18),
UINT64_C(0xd9ccd7ae0c252a27), UINT64_C(0xd62835b22e4b2417), UINT64_C(0xc3bea799b635d228),
UINT64_C(0xfd0511e51eb6c869), UINT64_C(0xe89383ce86c83e56), UINT64_C(0x80727d1c4fb0fceb),
UINT64_C(0x95e4ef37d7ce0ad4), UINT64_C(0xab5f594b7f4d1095), UINT64_C(0xbec9cb60e733e6aa),
UINT64_C(0x7a9ca4eeedbc95ef), UINT64_C(0x6f0a36c575c263d0), UINT64_C(0x51b180b9dd417991),
UINT64_C(0x44271292453f8fae), UINT64_C(0x2cc6ec408c474d13), UINT64_C(0x39507e6b1439bb2c),
UINT64_C(0x07ebc817bcbaa16d), UINT64_C(0x127d5a3c24c45752), UINT64_C(0x18220c2ff866bd0d),
UINT64_C(0x0db49e0460184b32), UINT64_C(0x330f2878c89b5173), UINT64_C(0x2699ba5350e5a74c),
UINT64_C(0x4e784481999d65f1), UINT64_C(0x5beed6aa01e393ce), UINT64_C(0x655560d6a960898f),
UINT64_C(0x70c3f2fd311e7fb0), UINT64_C(0xb4969d733b910cf5), UINT64_C(0xa1000f58a3effaca),
UINT64_C(0x9fbbb9240b6ce08b), UINT64_C(0x8a2d2b0f931216b4), UINT64_C(0xe2ccd5dd5a6ad409),
UINT64_C(0xf75a47f6c2142236), UINT64_C(0xc9e1f18a6a973877), UINT64_C(0xdc7763a1f2e9ce48),
UINT64_C(0xd39381bdd087c078), UINT64_C(0xc605139648f93647), UINT64_C(0xf8bea5eae07a2c06),
UINT64_C(0xed2837c17804da39), UINT64_C(0x85c9c913b17c1884), UINT64_C(0x905f5b382902eebb),
UINT64_C(0xaee4ed448181f4fa), UINT64_C(0xbb727f6f19ff02c5), UINT64_C(0x7f2710e113707180),
UINT64_C(0x6ab182ca8b0e87bf), UINT64_C(0x540a34b6238d9dfe), UINT64_C(0x419ca69dbbf36bc1),
UINT64_C(0x297d584f728ba97c), UINT64_C(0x3cebca64eaf55f43), UINT64_C(0x02507c1842764502),
UINT64_C(0x17c6ee33da08b33d) },
{ UINT64_C(0x0000000000000000), UINT64_C(0x2ddda07ff6672378), UINT64_C(0x5bbb40ffecce46f0),
UINT64_C(0x7666e0801aa96588), UINT64_C(0xb77681ffd99c8de0), UINT64_C(0x9aab21802ffbae98),
UINT64_C(0xeccdc1003552cb10), UINT64_C(0xc110617fc335e868), UINT64_C(0xfc35acd41c370545),
UINT64_C(0xd1e80cabea50263d), UINT64_C(0xa78eec2bf0f943b5), UINT64_C(0x8a534c54069e60cd),
UINT64_C(0x4b432d2bc5ab88a5), UINT64_C(0x669e8d5433ccabdd), UINT64_C(0x10f86dd42965ce55),
UINT64_C(0x3d25cdabdf02ed2d), UINT64_C(0x6ab3f6839760140f), UINT64_C(0x476e56fc61073777),
UINT64_C(0x3108b67c7bae52ff), UINT64_C(0x1cd516038dc97187), UINT64_C(0xddc5777c4efc99ef),
UINT64_C(0xf018d703b89bba97), UINT64_C(0x867e3783a232df1f), UINT64_C(0xaba397fc5455fc67),
UINT64_C(0x96865a578b57114a), UINT64_C(0xbb5bfa287d303232), UINT64_C(0xcd3d1aa8679957ba),
UINT64_C(0xe0e0bad791fe74c2), UINT64_C(0x21f0dba852cb9caa), UINT64_C(0x0c2d7bd7a4acbfd2),
UINT64_C(0x7a4b9b57be05da5a), UINT64_C(0x57963b284862f922), UINT64_C(0xd567ed072ec0281e),
UINT64_C(0xf8ba4d78d8a70b66), UINT64_C(0x8edcadf8c20e6eee), UINT64_C(0xa3010d8734694d96),
UINT64_C(0x62116cf8f75ca5fe), UINT64_C(0x4fcccc87013b8686), UINT64_C(0x39aa2c071b92e30e),
UINT64_C(0x14778c78edf5c076), UINT64_C(0x295241d332f72d5b), UINT64_C(0x048fe1acc4900e23),
UINT64_C(0x72e9012cde396bab), UINT64_C(0x5f34a153285e48d3), UINT64_C(0x9e24c02ceb6ba0bb),
UINT64_C(0xb3f960531d0c83c3), UINT64_C(0xc59f80d307a5e64b), UINT64_C(0xe84220acf1c2c533),
UINT64_C(0xbfd41b84b9a03c11), UINT64_C(0x9209bbfb4fc71f69), UINT64_C(0xe46f5b7b556e7ae1),
UINT64_C(0xc9b2fb04a3095999), UINT64_C(0x08a29a7b603cb1f1), UINT64_C(0x257f3a04965b9289),
UINT64_C(0x5319da848cf2f701), UINT64_C(0x7ec47afb7a95d479), UINT64_C(0x43e1b750a5973954),
UINT64_C(0x6e3c172f53f01a2c), UINT64_C(0x185af7af49597fa4), UINT64_C(0x358757d0bf3e5cdc),
UINT64_C(0xf49736af7c0bb4b4), UINT64_C(0xd94a96d08a6c97cc), UINT64_C(0xaf2c765090c5f244),
UINT64_C(0x82f1d62f66a2d13c), UINT64_C(0x38177525f28e4eb9), UINT64_C(0x15cad55a04e96dc1),
UINT64_C(0x63ac35da1e400849), UINT64_C(0x4e7195a5e8272b31), UINT64_C(0x8f61f4da2b12c359),
UINT64_C(0xa2bc54a5dd75e021), UINT64_C(0xd4dab425c7dc85a9), UINT64_C(0xf907145a31bba6d1),
UINT64_C(0xc422d9f1eeb94bfc), UINT64_C(0xe9ff798e18de6884), UINT64_C(0x9f99990e02770d0c),
UINT64_C(0xb2443971f4102e74), UINT64_C(0x7354580e3725c61c), UINT64_C(0x5e89f871c142e564),
UINT64_C(0x28ef18f1dbeb80ec), UINT64_C(0x0532b88e2d8ca394), UINT64_C(0x52a483a665ee5ab6),
UINT64_C(0x7f7923d9938979ce), UINT64_C(0x091fc35989201c46), UINT64_C(0x24c263267f473f3e),
UINT64_C(0xe5d20259bc72d756), UINT64_C(0xc80fa2264a15f42e), UINT64_C(0xbe6942a650bc91a6),
UINT64_C(0x93b4e2d9a6dbb2de), UINT64_C(0xae912f7279d95ff3), UINT64_C(0x834c8f0d8fbe7c8b),
UINT64_C(0xf52a6f8d95171903), UINT64_C(0xd8f7cff263703a7b), UINT64_C(0x19e7ae8da045d213),
UINT64_C(0x343a0ef25622f16b), UINT64_C(0x425cee724c8b94e3), UINT64_C(0x6f814e0dbaecb79b),
UINT64_C(0xed709822dc4e66a7), UINT64_C(0xc0ad385d2a2945df), UINT64_C(0xb6cbd8dd30802057),
UINT64_C(0x9b1678a2c6e7032f), UINT64_C(0x5a0619dd05d2eb47), UINT64_C(0x77dbb9a2f3b5c83f),
UINT64_C(0x01bd5922e91cadb7), UINT64_C(0x2c60f95d1f7b8ecf), UINT64_C(0x114534f6c07963e2),
UINT64_C(0x3c989489361e409a), UINT64_C(0x4afe74092cb72512), UINT64_C(0x6723d476dad0066a),
UINT64_C(0xa633b50919e5ee02), UINT64_C(0x8bee1576ef82cd7a), UINT64_C(0xfd88f5f6f52ba8f2),
UINT64_C(0xd0555589034c8b8a), UINT64_C(0x87c36ea14b2e72a8), UINT64_C(0xaa1ecedebd4951d0),
UINT64_C(0xdc782e5ea7e03458), UINT64_C(0xf1a58e2151871720), UINT64_C(0x30b5ef5e92b2ff48),
UINT64_C(0x1d684f2164d5dc30), UINT64_C(0x6b0eafa17e7cb9b8), UINT64_C(0x46d30fde881b9ac0),
UINT64_C(0x7bf6c275571977ed), UINT64_C(0x562b620aa17e5495), UINT64_C(0x204d828abbd7311d),
UINT64_C(0x0d9022f54db01265), UINT64_C(0xcc80438a8e85fa0d), UINT64_C(0xe15de3f578e2d975),
UINT64_C(0x973b0375624bbcfd), UINT64_C(0xbae6a30a942c9f85), UINT64_C(0x702eea4be51c9d72),
UINT64_C(0x5df34a34137bbe0a), UINT64_C(0x2b95aab409d2db82), UINT64_C(0x06480acbffb5f8fa),
UINT64_C(0xc7586bb43c801092), UINT64_C(0xea85cbcbcae733ea), UINT64_C(0x9ce32b4bd04e5662),
UINT64_C(0xb13e8b342629751a), UINT64_C(0x8c1b469ff92b9837), UINT64_C(0xa1c6e6e00f4cbb4f),
UINT64_C(0xd7a0066015e5dec7), UINT64_C(0xfa7da61fe382fdbf), UINT64_C(0x3b6dc76020b715d7),
UINT64_C(0x16b0671fd6d036af), UINT64_C(0x60d6879fcc795327), UINT64_C(0x4d0b27e03a1e705f),
UINT64_C(0x1a9d1cc8727c897d), UINT64_C(0x3740bcb7841baa05), UINT64_C(0x41265c379eb2cf8d),
UINT64_C(0x6cfbfc4868d5ecf5), UINT64_C(0xadeb9d37abe0049d), UINT64_C(0x80363d485d8727e5),
UINT64_C(0xf650ddc8472e426d), UINT64_C(0xdb8d7db7b1496115), UINT64_C(0xe6a8b01c6e4b8c38),
UINT64_C(0xcb751063982caf40), UINT64_C(0xbd13f0e38285cac8), UINT64_C(0x90ce509c74e2e9b0),
UINT64_C(0x51de31e3b7d701d8), UINT64_C(0x7c03919c41b022a0), UINT64_C(0x0a65711c5b194728),
UINT64_C(0x27b8d163ad7e6450), UINT64_C(0xa549074ccbdcb56c), UINT64_C(0x8894a7333dbb9614),
UINT64_C(0xfef247b32712f39c), UINT64_C(0xd32fe7ccd175d0e4), UINT64_C(0x123f86b31240388c),
UINT64_C(0x3fe226cce4271bf4), UINT64_C(0x4984c64cfe8e7e7c), UINT64_C(0x6459663308e95d04),
UINT64_C(0x597cab98d7ebb029), UINT64_C(0x74a10be7218c9351), UINT64_C(0x02c7eb673b25f6d9),
UINT64_C(0x2f1a4b18cd42d5a1), UINT64_C(0xee0a2a670e773dc9), UINT64_C(0xc3d78a18f8101eb1),
UINT64_C(0xb5b16a98e2b97b39), UINT64_C(0x986ccae714de5841), UINT64_C(0xcffaf1cf5cbca163),
UINT64_C(0xe22751b0aadb821b), UINT64_C(0x9441b130b072e793), UINT64_C(0xb99c114f4615c4eb),
UINT64_C(0x788c703085202c83), UINT64_C(0x5551d04f73470ffb), UINT64_C(0x233730cf69ee6a73),
UINT64_C(0x0eea90b09f89490b), UINT64_C(0x33cf5d1b408ba426), UINT64_C(0x1e12fd64b6ec875e),
UINT64_C(0x68741de4ac45e2d6), UINT64_C(0x45a9bd9b5a22c1ae), UINT64_C(0x84b9dce4991729c6),
UINT64_C(0xa9647c9b6f700abe), UINT64_C(0xdf029c1b75d96f36), UINT64_C(0xf2df3c6483be4c4e),
UINT64_C(0x48399f6e1792d3cb), UINT64_C(0x65e43f11e1f5f0b3), UINT64_C(0x1382df91fb5c953b),
UINT64_C(0x3e5f7fee0d3bb643), UINT64_C(0xff4f1e91ce0e5e2b), UINT64_C(0xd292beee38697d53),
UINT64_C(0xa4f45e6e22c018db), UINT64_C(0x8929fe11d4a73ba3), UINT64_C(0xb40c33ba0ba5d68e),
UINT64_C(0x99d193c5fdc2f5f6), UINT64_C(0xefb77345e76b907e), UINT64_C(0xc26ad33a110cb306),
UINT64_C(0x037ab245d2395b6e), UINT64_C(0x2ea7123a245e7816), UINT64_C(0x58c1f2ba3ef71d9e),
UINT64_C(0x751c52c5c8903ee6), UINT64_C(0x228a69ed80f2c7c4), UINT64_C(0x0f57c9927695e4bc),
UINT64_C(0x793129126c3c8134), UINT64_C(0x54ec896d9a5ba24c), UINT64_C(0x95fce812596e4a24),
UINT64_C(0xb821486daf09695c), UINT64_C(0xce47a8edb5a00cd4), UINT64_C(0xe39a089243c72fac),
UINT64_C(0xdebfc5399cc5c281), UINT64_C(0xf36265466aa2e1f9), UINT64_C(0x850485c6700b8471),
UINT64_C(0xa8d925b9866ca709), UINT64_C(0x69c944c645594f61), UINT64_C(0x4414e4b9b33e6c19),
UINT64_C(0x32720439a9970991), UINT64_C(0x1fafa4465ff02ae9), UINT64_C(0x9d5e72693952fbd5),
UINT64_C(0xb083d216cf35d8ad), UINT64_C(0xc6e53296d59cbd25), UINT64_C(0xeb3892e923fb9e5d),
UINT64_C(0x2a28f396e0ce7635), UINT64_C(0x07f553e916a9554d), UINT64_C(0x7193b3690c0030c5),
UINT64_C(0x5c4e1316fa6713bd), UINT64_C(0x616bdebd2565fe90), UINT64_C(0x4cb67ec2d302dde8),
UINT64_C(0x3ad09e42c9abb860), UINT64_C(0x170d3e3d3fcc9b18), UINT64_C(0xd61d5f42fcf97370),
UINT64_C(0xfbc0ff3d0a9e5008), UINT64_C(0x8da61fbd10373580), UINT64_C(0xa07bbfc2e65016f8),
UINT64_C(0xf7ed84eaae32efda), UINT64_C(0xda3024955855cca2), UINT64_C(0xac56c41542fca92a),
UINT64_C(0x818b646ab49b8a52), UINT64_C(0x409b051577ae623a), UINT64_C(0x6d46a56a81c94142),
UINT64_C(0x1b2045ea9b6024ca), UINT64_C(0x36fde5956d0707b2), UINT64_C(0x0bd8283eb205ea9f),
UINT64_C(0x260588414462c9e7), UINT64_C(0x506368c15ecbac6f), UINT64_C(0x7dbec8bea8ac8f17),
UINT64_C(0xbcaea9c16b99677f), UINT64_C(0x917309be9dfe4407), UINT64_C(0xe715e93e8757218f),
UINT64_C(0xcac84941713002f7) }
};
static inline uint64_t crc64_slow(const void* input, size_t nbytes)
{
const unsigned char* data = (const unsigned char*)input;
uint64_t cs = UINT64_C(0xffffffffffffffff);
while (nbytes--)
{
uint32_t idx = ((uint32_t)(cs ^ *data++)) & 0xff;
cs = crc64_table[3][idx] ^ (cs >> 8);
}
return cs ^ UINT64_C(0xffffffffffffffff);
}
// Loads an input 32-bit word in little-endian order from a big-endian machine.
static inline uint32_t crc64_load_le32_(const uint32_t* p)
{
#ifdef __ppc__
// See: http://hardwarebug.org/2008/10/25/gcc-inline-asm-annoyance/
uint32_t v;
asm("lwbrx %0, %y1" : "=r"(v) : "Z"(*p));
return v;
#else
uint32_t w = *p;
return ((((w)&0xff000000) >> 24) | (((w)&0x00ff0000) >> 8) | (((w)&0x0000ff00) << 8) |
(((w)&0x000000ff) << 24));
#endif
}
// A parallel multiword interleaved algorithm with a word size of 4 bytes
// and a stride factor of 5.
static inline uint64_t crc64(const void* input, size_t nbytes)
{
const unsigned char* data = (const unsigned char*)input;
const unsigned char* end = data + nbytes;
uint64_t cs[5] = { UINT64_C(0xffffffffffffffff), 0, 0, 0, 0 };
// Process byte-by-byte until proper alignment is attained.
// In the inner loop, we process 5 4-byte words (20 bytes in total)
// per iteration. If the amount of data remaining is small,
// then we also use the slow algorithm.
while (data < end && ((((size_t)data) & 3) || (end - data < 20)))
{
uint32_t idx = ((uint32_t)(cs[0] ^ *data++)) & 0xff;
cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8);
}
if (data == end)
return cs[0] ^ UINT64_C(0xffffffffffffffff);
const uint32_t one = 1;
bool big_endian = !(*((char*)(&one)));
uint64_t cry = 0;
uint32_t in[5];
if (!big_endian)
{
for (unsigned i = 0; i < 5; ++i)
in[i] = ((const uint32_t*)data)[i];
data += 20;
for (; end - data >= 20; data += 20)
{
cs[0] ^= cry;
in[0] ^= (uint32_t)cs[0];
cs[1] ^= cs[0] >> 32;
cs[0] = crc64_interleaved_table[0][in[0] & 0xff];
in[0] >>= 8;
in[1] ^= (uint32_t)cs[1];
cs[2] ^= cs[1] >> 32;
cs[1] = crc64_interleaved_table[0][in[1] & 0xff];
in[1] >>= 8;
in[2] ^= (uint32_t)cs[2];
cs[3] ^= cs[2] >> 32;
cs[2] = crc64_interleaved_table[0][in[2] & 0xff];
in[2] >>= 8;
in[3] ^= (uint32_t)cs[3];
cs[4] ^= cs[3] >> 32;
cs[3] = crc64_interleaved_table[0][in[3] & 0xff];
in[3] >>= 8;
in[4] ^= (uint32_t)cs[4];
cry = cs[4] >> 32;
cs[4] = crc64_interleaved_table[0][in[4] & 0xff];
in[4] >>= 8;
for (unsigned b = 1; b < 3; ++b)
{
cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff];
in[0] >>= 8;
cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff];
in[1] >>= 8;
cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff];
in[2] >>= 8;
cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff];
in[3] >>= 8;
cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff];
in[4] >>= 8;
}
cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff];
in[0] = ((const uint32_t*)data)[0];
cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff];
in[1] = ((const uint32_t*)data)[1];
cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff];
in[2] = ((const uint32_t*)data)[2];
cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff];
in[3] = ((const uint32_t*)data)[3];
cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff];
in[4] = ((const uint32_t*)data)[4];
}
}
else
{
for (unsigned i = 0; i < 5; ++i)
{
in[i] = crc64_load_le32_(&((const uint32_t*)data)[i]);
}
data += 20;
for (; end - data >= 20; data += 20)
{
cs[0] ^= cry;
in[0] ^= (uint32_t)cs[0];
cs[1] ^= cs[0] >> 32;
cs[0] = crc64_interleaved_table[0][in[0] & 0xff];
in[0] >>= 8;
in[1] ^= (uint32_t)cs[1];
cs[2] ^= cs[1] >> 32;
cs[1] = crc64_interleaved_table[0][in[1] & 0xff];
in[1] >>= 8;
in[2] ^= (uint32_t)cs[2];
cs[3] ^= cs[2] >> 32;
cs[2] = crc64_interleaved_table[0][in[2] & 0xff];
in[2] >>= 8;
in[3] ^= (uint32_t)cs[3];
cs[4] ^= cs[3] >> 32;
cs[3] = crc64_interleaved_table[0][in[3] & 0xff];
in[3] >>= 8;
in[4] ^= (uint32_t)cs[4];
cry = cs[4] >> 32;
cs[4] = crc64_interleaved_table[0][in[4] & 0xff];
in[4] >>= 8;
for (unsigned b = 1; b < 3; ++b)
{
cs[0] ^= crc64_interleaved_table[b][in[0] & 0xff];
in[0] >>= 8;
cs[1] ^= crc64_interleaved_table[b][in[1] & 0xff];
in[1] >>= 8;
cs[2] ^= crc64_interleaved_table[b][in[2] & 0xff];
in[2] >>= 8;
cs[3] ^= crc64_interleaved_table[b][in[3] & 0xff];
in[3] >>= 8;
cs[4] ^= crc64_interleaved_table[b][in[4] & 0xff];
in[4] >>= 8;
}
cs[0] ^= crc64_interleaved_table[3][in[0] & 0xff];
in[0] = crc64_load_le32_(&((const uint32_t*)data)[0]);
cs[1] ^= crc64_interleaved_table[3][in[1] & 0xff];
in[1] = crc64_load_le32_(&((const uint32_t*)data)[1]);
cs[2] ^= crc64_interleaved_table[3][in[2] & 0xff];
in[2] = crc64_load_le32_(&((const uint32_t*)data)[2]);
cs[3] ^= crc64_interleaved_table[3][in[3] & 0xff];
in[3] = crc64_load_le32_(&((const uint32_t*)data)[3]);
cs[4] ^= crc64_interleaved_table[3][in[4] & 0xff];
in[4] = crc64_load_le32_(&((const uint32_t*)data)[4]);
}
}
cs[0] ^= cry;
for (unsigned i = 0; i < 5; ++i)
{
if (i > 0)
cs[0] ^= cs[i];
in[i] ^= (uint32_t)cs[0];
cs[0] = cs[0] >> 32;
for (unsigned b = 0; b < 3; ++b)
{
cs[0] ^= crc64_table[b][in[i] & 0xff];
in[i] >>= 8;
}
cs[0] ^= crc64_table[3][in[i] & 0xff];
}
while (data < end)
{
uint32_t idx = ((uint32_t)(cs[0] ^ *data++)) & 0xff;
cs[0] = crc64_table[3][idx] ^ (cs[0] >> 8);
}
return cs[0] ^ UINT64_C(0xffffffffffffffff);
}
// Calculate the 'check bytes' for the provided checksum. If these bytes are
// appended to the original buffer, then the new total checksum should be zero.
static inline void crc64_invert(uint64_t cs, void* buffer)
{
unsigned char* bytes = (unsigned char*)buffer;
cs ^= UINT64_C(0xffffffffffffffff);
// The CRC is self-inverting (in big-endian, so the bit-reversed CRC is
// self-inverting in little-endian).
bytes[7] = (cs >> 56) & 0xff;
bytes[6] = (cs >> 48) & 0xff;
bytes[5] = (cs >> 40) & 0xff;
bytes[4] = (cs >> 32) & 0xff;
bytes[3] = (cs >> 24) & 0xff;
bytes[2] = (cs >> 16) & 0xff;
bytes[1] = (cs >> 8) & 0xff;
bytes[0] = cs & 0xff;
}
static const uint64_t crc64_x_pow_2n[64] = { UINT64_C(0x4000000000000000),
UINT64_C(0x2000000000000000), UINT64_C(0x0800000000000000), UINT64_C(0x0080000000000000),
UINT64_C(0x0000800000000000), UINT64_C(0x0000000080000000), UINT64_C(0xc96c5795d7870f42),
UINT64_C(0x6d5f4ad7e3c3afa0), UINT64_C(0xd49f7e445077d8ea), UINT64_C(0x040fb02a53c216fa),
UINT64_C(0x6bec35957b9ef3a0), UINT64_C(0xb0e3bb0658964afe), UINT64_C(0x218578c7a2dff638),
UINT64_C(0x6dbb920f24dd5cf2), UINT64_C(0x7a140cfcdb4d5eb5), UINT64_C(0x41b3705ecbc4057b),
UINT64_C(0xd46ab656accac1ea), UINT64_C(0x329beda6fc34fb73), UINT64_C(0x51a4fcd4350b9797),
UINT64_C(0x314fa85637efae9d), UINT64_C(0xacf27e9a1518d512), UINT64_C(0xffe2a3388a4d8ce7),
UINT64_C(0x48b9697e60cc2e4e), UINT64_C(0xada73cb78dd62460), UINT64_C(0x3ea5454d8ce5c1bb),
UINT64_C(0x5e84e3a6c70feaf1), UINT64_C(0x90fd49b66cbd81d1), UINT64_C(0xe2943e0c1db254e8),
UINT64_C(0xecfa6adeca8834a1), UINT64_C(0xf513e212593ee321), UINT64_C(0xf36ae57331040916),
UINT64_C(0x63fbd333b87b6717), UINT64_C(0xbd60f8e152f50b8b), UINT64_C(0xa5ce4a8299c1567d),
UINT64_C(0x0bd445f0cbdb55ee), UINT64_C(0xfdd6824e20134285), UINT64_C(0xcead8b6ebda2227a),
UINT64_C(0xe44b17e4f5d4fb5c), UINT64_C(0x9b29c81ad01ca7c5), UINT64_C(0x1b4366e40fea4055),
UINT64_C(0x27bca1551aae167b), UINT64_C(0xaa57bcd1b39a5690), UINT64_C(0xd7fce83fa1234db9),
UINT64_C(0xcce4986efea3ff8e), UINT64_C(0x3602a4d9e65341f1), UINT64_C(0x722b1da2df516145),
UINT64_C(0xecfc3ddd3a08da83), UINT64_C(0x0fb96dcca83507e6), UINT64_C(0x125f2fe78d70f080),
UINT64_C(0x842f50b7651aa516), UINT64_C(0x09bc34188cd9836f), UINT64_C(0xf43666c84196d909),
UINT64_C(0xb56feb30c0df6ccb), UINT64_C(0xaa66e04ce7f30958), UINT64_C(0xb7b1187e9af29547),
UINT64_C(0x113255f8476495de), UINT64_C(0x8fb19f783095d77e), UINT64_C(0xaec4aacc7c82b133),
UINT64_C(0xf64e6d09218428cf), UINT64_C(0x036a72ea5ac258a0), UINT64_C(0x5235ef12eb7aaa6a),
UINT64_C(0x2fed7b1685657853), UINT64_C(0x8ef8951d46606fb5), UINT64_C(0x9d58c1090f034d14) };
// Compute (a*b) mod P
// See: https://code.google.com/p/crcutil/source/browse/code/gf_util.h
static inline uint64_t crc64_multiply_(uint64_t a, uint64_t b)
{
if ((a ^ (a - 1)) < (b ^ (b - 1)))
{
uint64_t t = a;
a = b;
b = t;
}
if (a == 0)
return 0;
uint64_t r = 0, h = UINT64_C(1) << 63;
for (; a != 0; a <<= 1)
{
if (a & h)
{
r ^= b;
a ^= h;
}
b = (b >> 1) ^ ((b & 1) ? crc64_poly : 0);
}
return r;
}
// Compute x**n mod P
static inline uint64_t crc64_x_pow_n_(uint64_t n)
{
uint64_t r = UINT64_C(1) << 63;
for (size_t i = 0; n != 0; n >>= 1, ++i)
{
if (n & 1)
r = crc64_multiply_(r, crc64_x_pow_2n[i]);
}
return r;
}
static inline uint64_t crc64_combine(uint64_t cs1, uint64_t cs2, size_t nbytes2)
{
// For M = CONCAT(M1, M2) => CRC(M, a) = CRC(M2, CRC(M1, a)) and:
// CRC(M, b) = CRC(M, a) + ((b-a)x^|M|) mod P.
return cs2 ^ crc64_multiply_(cs1, crc64_x_pow_n_(8 * nbytes2));
}
static const size_t crc64_min_thread_bytes = 1024;
static inline uint64_t crc64_omp(const void* input, size_t nbytes)
{
#ifdef _OPENMP
if (nbytes > 2 * crc64_min_thread_bytes)
{
int nthreads = omp_get_max_threads();
if (nbytes < nthreads * crc64_min_thread_bytes)
nthreads = nbytes / crc64_min_thread_bytes;
uint64_t thread_cs[nthreads];
size_t thread_sz[nthreads];
const unsigned char* data = (const unsigned char*)input;
#pragma omp parallel num_threads(nthreads)
{
int tid = omp_get_thread_num();
size_t bpt = nbytes / nthreads;
const unsigned char *start = data + bpt * tid, *end;
if (tid != nthreads - 1)
end = start + bpt;
else
end = data + nbytes;
size_t sz = end - start;
thread_sz[tid] = sz;
thread_cs[tid] = crc64(start, sz);
}
uint64_t cs = thread_cs[0];
for (int i = 1; i < nthreads; ++i)
{
cs = crc64_combine(cs, thread_cs[i], thread_sz[i]);
}
return cs;
}
#endif
return crc64(input, nbytes);
}
} /* END namespace lanl */
#endif // CRC64_H
|
anti_dep_1.yes.c | int main(int argc,char *argv[])
{
double a[20][20];
#pragma omp parallel for
for (int i = 0; i <= 19 - 1; i += 1) {
for (int j = 0; j <= 20 - 1; j += 1) {
a[i][j] += a[i + 1][j];
}
}
return 0;
}
|
spatial_index.h | /*
* Copyright (c) 2018
* Markus Goetz
*
* This software may be modified and distributed under the terms of MIT-style license.
*
* Description: Indexes the features space to allow fast neighborhood queries
*
* Maintainer: m.goetz
*
* Email: markus.goetz@kit.edu
*/
#ifndef SPATIAL_INDEX_H
#define SPATIAL_INDEX_H
#include <algorithm>
#include <atomic>
#include <cmath>
#include <functional>
#include <hdf5.h>
#include <limits>
#include <numeric>
#include <omp.h>
#include <parallel/algorithm>
#include <vector>
#ifdef WITH_OUTPUT
#include <iostream>
#endif
#include "constants.h"
#include "dataset.h"
#ifdef WITH_MPI
#include <mpi.h>
#include "mpi_util.h"
#endif
struct EPSConfig {
std::vector<size_t> dimensions;
float epsilon = 0.0;
};
template <typename T>
class SpatialIndex {
Dataset& m_data;
std::vector<float> m_epsilon_map;
std::vector<T> m_minimums;
std::vector<T> m_maximums;
std::vector<size_t> m_cell_dimensions;
size_t m_total_cells;
Cell m_last_cell;
Cells m_cells;
CellHistogram m_cell_histogram;
CellIndex m_cell_index;
std::vector<size_t> m_swapped_dimensions;
size_t m_halo;
size_t m_global_point_offset;
std::vector<size_t> m_initial_order;
#ifdef WITH_MPI
int m_rank;
int m_size;
std::vector<CellBounds> m_cell_bounds;
std::vector<ComputeBounds> m_compute_bounds;
#endif
public:
// implementations of the custom omp reduction operations
static void vector_min(std::vector<T>& omp_in, std::vector<T>& omp_out) {
for (size_t index = 0; index < omp_out.size(); ++index) {
omp_out[index] = std::min(omp_in[index], omp_out[index]);
}
}
#pragma omp declare reduction(vector_min: std::vector<T>: vector_min(omp_in, omp_out)) initializer(omp_priv = omp_orig)
static void vector_max(std::vector<T>& omp_in, std::vector<T>& omp_out) {
for (size_t index = 0; index < omp_out.size(); ++index) {
omp_out[index] = std::max(omp_in[index], omp_out[index]);
}
}
#pragma omp declare reduction(vector_max: std::vector<T>: vector_max(omp_in, omp_out)) initializer(omp_priv = omp_orig)
static void merge_histograms(CellHistogram& omp_in, CellHistogram& omp_out) {
for (const auto& cell: omp_in) {
omp_out[cell.first] += cell.second;
}
}
#pragma omp declare reduction(merge_histograms: CellHistogram: merge_histograms(omp_in, omp_out)) initializer(omp_priv = omp_orig)
private:
void compute_initial_order() {
#pragma omp parallel for
for (size_t i = 0; i < m_data.m_chunk[0]; ++i) {
m_initial_order[i] += i + m_data.m_offset[0];
}
}
void compute_space_dimensions() {
const size_t dimensions = m_minimums.size();
const size_t bytes = m_cells.size() * dimensions;
const T* end_point = static_cast<T*>(m_data.m_p) + bytes;
// compute the local feature space minimums and maximums in parallel
auto& minimums = m_minimums;
auto& maximums = m_maximums;
#pragma omp parallel for reduction(vector_min: minimums) reduction(vector_max: maximums)
for (T* point = static_cast<T*>(m_data.m_p); point < end_point; point += dimensions) {
for (size_t d = 0; d < dimensions; ++d) {
const T& coordinate = point[d];
minimums[d] = std::min(minimums[d], coordinate);
maximums[d] = std::max(maximums[d], coordinate);
}
}
// exchange globally, if necessary
#ifdef WITH_MPI
MPI_Allreduce(MPI_IN_PLACE, m_minimums.data(), dimensions, MPI_Types<T>::map(), MPI_MIN, MPI_COMM_WORLD);
MPI_Allreduce(MPI_IN_PLACE, m_maximums.data(), dimensions, MPI_Types<T>::map(), MPI_MAX, MPI_COMM_WORLD);
#endif
}
void compute_cell_dimensions() {
for (size_t i = 0; i < m_cell_dimensions.size(); ++i) {
size_t cells = static_cast<size_t>(std::ceil((m_maximums[i] - m_minimums[i]) / m_epsilon_map[i])) + 1;
m_cell_dimensions[i] = cells;
m_total_cells *= cells;
}
m_last_cell = m_total_cells;
}
void swap_dimensions() {
// fill the dimensions with an initially correct order
std::iota(m_swapped_dimensions.begin(), m_swapped_dimensions.end(), 0);
// swap the dimensions descending by their cell sizes
std::sort(m_swapped_dimensions.begin(), m_swapped_dimensions.end(), [&] (size_t a, size_t b) {
return m_cell_dimensions[a] < m_cell_dimensions[b];
});
// determine the halo size
m_halo = m_total_cells / m_cell_dimensions[m_swapped_dimensions.back()];
}
void compute_cells() {
CellHistogram histogram;
const size_t dimensions = m_data.m_chunk[1];
#pragma omp parallel for reduction(merge_histograms: histogram)
for (size_t i = 0; i < m_data.m_chunk[0]; ++i) {
const T* point = static_cast<T*>(m_data.m_p) + i * dimensions;
size_t cell = 0;
size_t accumulator = 1;
for (size_t d : m_swapped_dimensions) {
size_t index = static_cast<size_t>(std::floor((point[d] - m_minimums[d]) / m_epsilon_map[d]));
cell += index * accumulator;
accumulator *= m_cell_dimensions[d];
}
m_cells[i] = cell;
++histogram[cell];
}
m_cell_histogram.swap(histogram);
}
void compute_cell_index() {
size_t accumulator = 0;
// sum up the offset into the points array
for (auto& cell : m_cell_histogram)
{
auto& index = m_cell_index[cell.first];
index.first = accumulator;
index.second = cell.second;
accumulator += cell.second;
}
// introduce an end dummy
m_cell_index[m_last_cell].first = m_cells.size();
m_cell_index[m_last_cell].second = 0;
}
void sort_by_cell() {
const hsize_t items = m_data.m_chunk[0];
const hsize_t dimensions = m_data.m_chunk[1];
// initialize out-of-place buffers
Cells reordered_cells(items);
std::vector<size_t> reordered_indices(items);
std::vector<T> reordered_points(items * dimensions);
// memory for offset of already placed items
std::unordered_map<Cell, std::atomic<size_t>> offsets;
for (const auto& cell_index : m_cell_index) {
offsets[cell_index.first].store(0);
}
// sorting the points and cells out-of-place, memorize the original order
#pragma omp parallel for
for (size_t i = 0; i < items; ++i) {
const Cell cell = m_cells[i];
const auto& locator = m_cell_index[cell];
const size_t copy_to = locator.first + (offsets[cell]++);
reordered_cells[copy_to] = m_cells[i];
reordered_indices[copy_to] = m_initial_order[i];
for (size_t d = 0; d < dimensions; ++d) {
reordered_points[copy_to * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d];
}
}
// move the out-of-place results into the correct in-place buffers
m_cells.swap(reordered_cells);
m_initial_order.swap(reordered_indices);
std::copy(reordered_points.begin(), reordered_points.end(), static_cast<T*>(m_data.m_p));
}
#ifdef WITH_MPI
CellHistogram compute_global_histogram() {
// fetch cell histograms across all nodes
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
// determine the number of entries in each process' histogram
for (int i = 0; i < m_size; ++i) {
send_counts[i] = m_cell_histogram.size() * 2;
send_displs[i] = 0;
}
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
// ... based on this information we can calculate the displacements into the buffer
size_t entries_count = 0;
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = entries_count;
entries_count += recv_counts[i];
}
// serialize the local histogram into a flat buffer
std::vector<size_t> send_buffer(m_cell_histogram.size() * 2);
size_t send_buffer_index = 0;
for (const auto& item : m_cell_histogram) {
send_buffer[send_buffer_index++] = item.first;
send_buffer[send_buffer_index++] = item.second;
}
// exchange the histograms
std::vector<size_t> recv_buffer(entries_count);
MPI_Alltoallv(
send_buffer.data(), send_counts, send_displs, MPI_UNSIGNED_LONG,
recv_buffer.data(), recv_counts, recv_displs, MPI_UNSIGNED_LONG, MPI_COMM_WORLD
);
// sum-up the entries into a global histogram
CellHistogram global_histogram;
for (size_t i = 0; i < entries_count; i += 2) {
global_histogram[recv_buffer[i]] += recv_buffer[i + 1];
}
// remember the new globally last cell
m_last_cell = global_histogram.rbegin()->first + 1;
return global_histogram;
}
size_t compute_score(const Cell cell_id, const CellHistogram& cell_histogram) {
const hsize_t dimensions = m_data.m_chunk[1];
// allocate buffer for the dimensions steps
Cells neighboring_cells;
neighboring_cells.reserve(std::pow(3, dimensions));
neighboring_cells.push_back(cell_id);
// accumulators for sub-space traversal
size_t cells_in_lower_space = 1;
size_t cells_in_current_space = 1;
size_t points_in_cell = cell_histogram.find(cell_id)->second;
size_t number_of_points = points_in_cell;
// iterate through all neighboring cells and up the number of points stored there
for (size_t d : m_swapped_dimensions) {
cells_in_current_space *= m_cell_dimensions[d];
for (size_t i = 0, end = neighboring_cells.size(); i < end; ++i) {
const Cell current = neighboring_cells[i];
// cell to the left
const Cell left = current - cells_in_lower_space;
if (current % cells_in_current_space >= cells_in_lower_space) {
const auto& locator = cell_histogram.find(left);
number_of_points += locator != cell_histogram.end() ? locator->second : 0;
neighboring_cells.push_back(left);
}
// cell to the right
const Cell right = current + cells_in_lower_space;
if (current % cells_in_current_space < cells_in_current_space - cells_in_lower_space) {
const auto& locator = cell_histogram.find(right);
number_of_points += locator != cell_histogram.end() ? locator->second : 0;
neighboring_cells.push_back(right);
}
}
cells_in_lower_space = cells_in_current_space;
}
return points_in_cell * number_of_points;
}
void compute_bounds(const CellHistogram& cell_histogram) {
// make space in for the values in the bounds variables
m_cell_bounds.resize(m_size);
m_compute_bounds.resize(m_size);
// compute the score value for each cell and accumulate the total score first...
std::vector<size_t> scores(cell_histogram.size(), 0);
size_t total_score = 0;
size_t score_index = 0;
for (auto& pair : cell_histogram) {
const Cell cell = pair.first;
const size_t score = compute_score(cell, cell_histogram);
scores[score_index++] = score;
total_score += score;
}
// ...to determine the actual bounds
const size_t score_per_chunk = total_score / m_size + 1;
size_t accumulator = 0;
size_t target_rank = 0;
size_t lower_split_point = 0;
size_t bound_lower_start = 0;
auto cell_buckets = cell_histogram.begin();
// iterate over the score array and find the point where the score per chunk is exceeded
for (size_t i = 0; i < scores.size(); ++ i) {
const auto& cell_bucket = cell_buckets++;
const Cell cell = cell_bucket->first;
const size_t score = scores[i];
accumulator += score;
while (accumulator > score_per_chunk) {
const size_t split_point = (accumulator - score_per_chunk) / (score / cell_bucket->second);
// we have have identified the bounds in which the rank needs to compute locally
m_compute_bounds[target_rank][0] = lower_split_point;
m_compute_bounds[target_rank][1] = split_point;
lower_split_point = split_point;
// determine the cell bounds, i.e. all cells that we need including halo
auto& bound = m_cell_bounds[target_rank];
const size_t cell_offset = (bound_lower_start % m_halo) + m_halo;
bound[0] = cell_offset > bound_lower_start ? 0 : bound_lower_start - cell_offset;
bound[1] = bound_lower_start;
bound[2] = cell + 1;
bound[3] = std::min((bound[2] / m_halo) * m_halo + (m_halo * 2), m_last_cell);
// update the state, a whole chunk has been assigned
bound_lower_start = bound[2];
accumulator = split_point * score / cell_bucket->second;
// start assigning to the next rank
++target_rank;
}
// the left-overs are assigned to the current rank
if (static_cast<int>(target_rank) == m_size - 1 or i == cell_histogram.size() - 1) {
// compute bounds first
m_compute_bounds[target_rank][0] = lower_split_point;
m_compute_bounds[target_rank][1] = 0;
// cell bounds including halo next
auto& bound = m_cell_bounds[target_rank];
const size_t cell_offset = (bound_lower_start % m_halo) + m_halo;
bound[0] = cell_offset > bound_lower_start ? 0 : bound_lower_start - cell_offset;
bound[1] = bound_lower_start;
bound[2] = m_last_cell;
bound[3] = m_last_cell;
// we are done here
break;
}
}
}
void redistribute_dataset() {
const size_t dimensions = m_data.m_chunk[1];
// calculate the send number of points to be transmitted to each rank
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
for (int i = 0; i < m_size; ++i) {
const auto& bound = m_cell_bounds[i];
const size_t lower = m_cell_index.lower_bound(bound[0])->second.first;
const size_t upper = m_cell_index.lower_bound(bound[3])->second.first;
send_displs[i] = lower * dimensions;
send_counts[i] = upper * dimensions - send_displs[i];
}
// exchange how much data we send/receive to and from each rank
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = (i == 0) ? 0 : (recv_displs[i - 1] + recv_counts[i - 1]);
}
// calculate the corresponding send and receive counts for the label/order vectors
size_t total_recv_items = 0;
int send_counts_labels[m_size];
int send_displs_labels[m_size];
int recv_counts_labels[m_size];
int recv_displs_labels[m_size];
for (int i = 0; i < m_size; ++i) {
total_recv_items += recv_counts[i];
send_counts_labels[i] = send_counts[i] / dimensions;
send_displs_labels[i] = send_displs[i] / dimensions;
recv_counts_labels[i] = recv_counts[i] / dimensions;
recv_displs_labels[i] = recv_displs[i] / dimensions;
}
// allocate new buffers for the points and the order vectors
T* point_buffer = new T[total_recv_items];
std::vector<size_t> order_buffer(total_recv_items / dimensions);
// actually transmit the data
MPI_Alltoallv(
static_cast<T*>(m_data.m_p), send_counts, send_displs, MPI_Types<T>::map(),
point_buffer, recv_counts, recv_displs, MPI_Types<T>::map(), MPI_COMM_WORLD
);
MPI_Alltoallv(
m_initial_order.data(), send_counts_labels, send_displs_labels, MPI_UNSIGNED_LONG,
order_buffer.data(), recv_counts_labels, recv_displs_labels, MPI_UNSIGNED_LONG, MPI_COMM_WORLD
);
// clean up the previous data
delete[] static_cast<T*>(m_data.m_p);
m_cells.clear();
m_cell_index.clear();
// assign the new data
const hsize_t new_item_count = total_recv_items / dimensions;
m_data.m_chunk[0] = new_item_count;
m_cells.resize(new_item_count);
m_data.m_p = point_buffer;
m_initial_order.swap(order_buffer);
}
void compute_global_point_offset() {
m_global_point_offset = upper_halo_bound() - lower_halo_bound();
MPI_Exscan(MPI_IN_PLACE, &m_global_point_offset, 1, MPI_UNSIGNED_LONG, MPI_SUM, MPI_COMM_WORLD);
if (m_rank == 0) m_global_point_offset = 0;
}
void sort_by_order(Clusters& clusters) {
// allocate the radix buckets
const size_t maximum_digit_count = static_cast<size_t>(std::ceil(std::log10(m_data.m_shape[0])));
std::vector<std::vector<size_t>> buckets(maximum_digit_count, std::vector<size_t>(RADIX_BUCKETS));
// count the items per bucket
size_t lower_bound = lower_halo_bound();
size_t upper_bound = upper_halo_bound();
const size_t items = upper_bound - lower_bound;
#pragma omp parallel for schedule(static)
for (size_t i = 0; i < items; ++i) {
for (size_t j = 0; j < maximum_digit_count; ++j) {
const size_t base = RADIX_POWERS[j];
const size_t digit = m_initial_order[i + lower_bound] / base % RADIX_BUCKETS;
#pragma omp atomic
++buckets[j][digit];
}
}
// accumulate the bucket entries to get the offsets
#pragma omp parallel for shared(buckets)
for (size_t j = 0; j < maximum_digit_count; ++j) {
for (size_t f = 1; f < RADIX_BUCKETS; ++f) {
buckets[j][f] += buckets[j][f-1];
}
}
// actually reorder the points out-of-place
const hsize_t dimensions = m_data.m_chunk[1];
Clusters cluster_buffer(items);
std::vector<size_t> order_buffer(items);
T* point_buffer = new T[items * dimensions];
for (size_t j = 0; j < maximum_digit_count; ++j) {
const size_t base = RADIX_POWERS[j];
const size_t point_offset = lower_bound * dimensions;
// assign the number to the respective radix bucket
for (size_t i = items - 1; i < items; --i) {
size_t unit = m_initial_order[i + lower_bound] / base % RADIX_BUCKETS;
size_t pos = --buckets[j][unit];
order_buffer[pos] = m_initial_order[i + lower_bound];
cluster_buffer[pos] = clusters[i + lower_bound];
for (size_t d = 0; d < dimensions; ++d) {
point_buffer[pos * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d + point_offset];
}
}
// swap the buffers
clusters.swap(cluster_buffer);
m_initial_order.swap(order_buffer);
T* temp = static_cast<T*>(m_data.m_p);
m_data.m_p = point_buffer;
point_buffer = temp;
// this is somewhat hacky, in the first round we have the original buffers including(!) halos
// after the first swap, we do not anymore, since we reduced all the elements done to the non-halo zone
// we can easily adjust to that by removing the initial halo lower_bound offset
if (j == 0) {
lower_bound = 0;
}
}
// clean up
delete[] point_buffer;
m_data.m_chunk[0] = items;
}
#endif
public:
// SpatialIndex(Dataset& data, std::vector<float> epsilon_map)
SpatialIndex(Dataset& data, std::vector<float> epsilon_map)
: m_data(data),
m_epsilon_map(std::move(epsilon_map)),
m_minimums(data.m_chunk[1], std::numeric_limits<T>::max()),
m_maximums(data.m_chunk[1], std::numeric_limits<T>::min()),
m_cell_dimensions(data.m_chunk[1], 0),
m_total_cells(1),
m_last_cell(0),
m_cells(data.m_chunk[0], 0),
m_swapped_dimensions(data.m_chunk[1], 0),
m_halo(0),
m_global_point_offset(0),
m_initial_order(data.m_chunk[0]) {
// determine the space dimensions, the corresponding number of cells for each feature dimension
#ifdef WITH_MPI
MPI_Comm_rank(MPI_COMM_WORLD, &m_rank);
MPI_Comm_size(MPI_COMM_WORLD, &m_size);
#endif
#ifdef WITH_OUTPUT
double start = omp_get_wtime();
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "Computing cell space" << std::endl;
std::cout << "\tComputing dimensions..." << std::flush;
#ifdef WITH_MPI
}
#endif
#endif
compute_initial_order();
compute_space_dimensions();
compute_cell_dimensions();
swap_dimensions();
// determine the cell for each point, compute the cell histogram and index the points as if they were sorted...
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
std::cout << "\tComputing cells... " << std::flush;
#ifdef WITH_MPI
}
#endif
start = omp_get_wtime();
#endif
compute_cells();
compute_cell_index();
// ... actually sort the points to allow for O(1) access during neighborhood queries
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
std::cout << "\tSorting points... " << std::flush;
#ifdef WITH_MPI
}
#endif
start = omp_get_wtime();
#endif
sort_by_cell();
#ifdef WITH_OUTPUT
#ifdef WITH_MPI
if (m_rank == 0) {
#endif
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
#ifdef WITH_MPI
}
#endif
#endif
// communicate the cell histograms and redistribute the points - only necessary when MPI is turned on
#ifdef WITH_MPI
#ifdef WITH_OUTPUT
start = omp_get_wtime();
if (m_rank == 0) {
std::cout << "\tDistributing points... " << std::flush;
}
#endif
// compute a global histogram and redistribute the points based on that
CellHistogram global_histogram = compute_global_histogram();
compute_bounds(global_histogram);
global_histogram.clear();
redistribute_dataset();
// after the redistribution we have to reindex the new data yet again
compute_cells();
compute_cell_index();
compute_global_point_offset();
sort_by_cell();
#ifdef WITH_OUTPUT
if (m_rank == 0) {
std::cout << "[OK] in " << omp_get_wtime() - start << std::endl;
}
#endif
#endif
}
#ifdef WITH_MPI
size_t lower_halo_bound() const {
size_t lower = m_cell_index.lower_bound(m_cell_bounds[m_rank][1])->second.first;
return lower - m_compute_bounds[m_rank][0];
}
size_t upper_halo_bound() const {
size_t upper = m_cell_index.lower_bound(m_cell_bounds[m_rank][2])->second.first;
return upper - m_compute_bounds[m_rank][1];
}
Cuts compute_cuts() const {
Cuts cuts(m_size, Locator(0, 0));
for (int i = 0; i < m_size; ++i) {
// skip own rank
if (i == m_rank) {
continue;
}
const auto& cell_bound = m_cell_bounds[i];
const auto& compute_bound = m_compute_bounds[i];
// lower bound
const auto& lower_cut = m_cell_index.lower_bound(cell_bound[1]);
cuts[i].first = lower_cut->second.first;
if (lower_cut->first != m_last_cell or m_cell_index.find(m_last_cell - 1) != m_cell_index.end()) {
cuts[i].first = cuts[i].first < compute_bound[0] ? 0 : cuts[i].first - compute_bound[0];
}
// upper bound
const auto& upper_cut = m_cell_index.lower_bound(cell_bound[2]);
cuts[i].second = upper_cut->second.first;
if (upper_cut->first != m_last_cell || m_cell_index.find(m_last_cell - 1) != m_cell_index.end()) {
cuts[i].second = cuts[i].second < compute_bound[1] ? 0 : cuts[i].second - compute_bound[1];
}
}
return cuts;
}
#else
size_t lower_halo_bound() const {
return 0;
}
size_t upper_halo_bound() const {
return m_data.m_chunk[0];
}
#endif
inline Cell cell_of(size_t index) const {
return m_cells[index];
}
std::vector<size_t> get_neighbors(const Cell cell) const {
const hsize_t dimensions = m_data.m_chunk[1];
// allocate some space for the neighboring cells, be pessimistic and reserve 3^dims for possibly all neighbors
Cells neighboring_cells;
neighboring_cells.reserve(std::pow(3, dimensions));
neighboring_cells.push_back(cell);
// cell accumulators
size_t cells_in_lower_space = 1;
size_t cells_in_current_space = 1;
size_t number_of_points = m_cell_index.find(cell)->second.second;
// fetch all existing neighboring cells
for (size_t d : m_swapped_dimensions) {
cells_in_current_space *= m_cell_dimensions[d];
for (size_t i = 0, end = neighboring_cells.size(); i < end; ++i) {
const Cell current_cell = neighboring_cells[i];
// check "left" neighbor - a.k.a the cell in the current dimension that has a lower number
const Cell left = current_cell - cells_in_lower_space;
const auto found_left = m_cell_index.find(left);
if (current_cell % cells_in_current_space >= cells_in_lower_space) {
neighboring_cells.push_back(left);
number_of_points += found_left != m_cell_index.end() ? found_left->second.second : 0;
}
// check "right" neighbor - a.k.a the cell in the current dimension that has a higher number
const Cell right = current_cell + cells_in_lower_space;
const auto found_right = m_cell_index.find(right);
if (current_cell % cells_in_current_space < cells_in_current_space - cells_in_lower_space) {
neighboring_cells.push_back(right);
number_of_points += found_right != m_cell_index.end() ? found_right->second.second : 0;
}
}
cells_in_lower_space = cells_in_current_space;
}
// copy the points from the neighboring cells over
std::vector<size_t> neighboring_points;
neighboring_points.reserve(number_of_points);
for (size_t neighbor_cell : neighboring_cells) {
const auto found = m_cell_index.find(neighbor_cell);
// skip empty cells
if (found == m_cell_index.end()) {
continue;
}
// ... otherwise copy the points over
const Locator& locator = found->second;
neighboring_points.resize(neighboring_points.size() + locator.second);
std::iota(neighboring_points.end() - locator.second, neighboring_points.end(), locator.first);
}
return neighboring_points;
}
Cluster region_query(const size_t point_index, const std::vector<size_t>& neighboring_points, const float EPS2,
const Clusters& clusters, std::vector<size_t>& min_points_area) const {
const size_t dimensions = m_data.m_chunk[1];
const T* point = static_cast<T*>(m_data.m_p) + point_index * dimensions;
Cluster cluster_label = m_global_point_offset + point_index + 1;
// iterate through all neighboring points and check whether they are in range
for (size_t neighbor: neighboring_points) {
double offset = 0.0;
const T* other_point = static_cast<T*>(m_data.m_p) + neighbor * dimensions;
// determine euclidean distance to other point
for (size_t d = 0; d < dimensions; ++d) {
const size_t distance = point[d] - other_point[d];
offset += distance * distance;
}
// .. if in range, add it to the vector with in range points
if (offset <= EPS2) {
const Cluster neighbor_label = clusters[neighbor];
min_points_area.push_back(neighbor);
// if neighbor point has an assigned label and it is a core, determine what label to take
if (neighbor_label != NOT_VISITED and neighbor_label < 0) {
cluster_label = std::min(cluster_label, std::abs(neighbor_label));
}
}
}
return cluster_label;
}
size_t dimension() const {
return m_data.m_chunk[1];
}
Cluster region_query_multi_crit(const size_t point_index, const std::vector<size_t> &neighboring_points, const std::vector<EPSConfig> &all_eps,
const Clusters &clusters, std::vector<size_t> &min_points_area) const {
const size_t dimensions = m_data.m_chunk[1];
const T *point = static_cast<T *>(m_data.m_p) + point_index * dimensions;
Cluster cluster_label = m_global_point_offset + point_index + 1;
// iterate through all neighboring points and check whether they are in range
for (size_t neighbor : neighboring_points) {
const T *other_point = static_cast<T *>(m_data.m_p) + neighbor * dimensions;
std::vector<float> offsets;
for (auto &&eps_conf: all_eps) {
double offset = 0.0;
// determine euclidean distance to other point
for (auto &&d: eps_conf.dimensions) {
const size_t distance = point[d] - other_point[d];
offset += distance * distance;
}
offsets.push_back(offset);
}
// .. if in range, add it to the vector with in range points
bool in_range = true;
for (size_t i = 0; i < all_eps.size(); ++i) {
if (offsets[i] > all_eps[i].epsilon * all_eps[i].epsilon) {
in_range = false;
break;
}
}
if (in_range) {
const Cluster neighbor_label = clusters[neighbor];
min_points_area.push_back(neighbor);
// if neighbor point has an assigned label and it is a core, determine what label to take
if (neighbor_label != NOT_VISITED and neighbor_label < 0) {
cluster_label = std::min(cluster_label, std::abs(neighbor_label));
}
}
}
return cluster_label;
}
void recover_initial_order(Clusters& clusters) {
const hsize_t dimensions = m_data.m_chunk[1];
#ifdef WITH_MPI
sort_by_order(clusters);
// allocate buffers to do an inverse exchange
int send_counts[m_size];
int send_displs[m_size];
int recv_counts[m_size];
int recv_displs[m_size];
const size_t lower_bound = lower_halo_bound();
const size_t upper_bound = upper_halo_bound();
const size_t items = upper_bound - lower_bound;
const size_t chunk_size = m_data.m_shape[0] / m_size;
const size_t remainder = m_data.m_shape[0] % static_cast<size_t>(m_size);
size_t previous_offset = 0;
// find all the points that have a global index less than each rank's chunk size
for (size_t i = 1; i < static_cast<size_t>(m_size) + 1; ++i) {
const size_t chunk_end = chunk_size * i + (remainder > i ? i : remainder);
const auto split_iter = std::lower_bound(m_initial_order.begin(), m_initial_order.begin() + items, chunk_end);
size_t split_index = split_iter - m_initial_order.begin();
send_counts[i - 1] = static_cast<int>(split_index - previous_offset);
send_displs[i - 1] = static_cast<int>(previous_offset);
previous_offset = split_index;
}
// exchange the resulting item counts and displacements to get the incoming items for this rank
MPI_Alltoall(send_counts, 1, MPI_INT, recv_counts, 1, MPI_INT, MPI_COMM_WORLD);
for (int i = 0; i < m_size; ++i) {
recv_displs[i] = (i == 0) ? 0 : recv_displs[i - 1] + recv_counts[i - 1];
}
// redistribute the dataset to their original owner ranks
size_t total_recv_items = 0;
int send_counts_points[m_size];
int send_displs_points[m_size];
int recv_counts_points[m_size];
int recv_displs_points[m_size];
for (int i = 0; i < m_size; ++i) {
total_recv_items += recv_counts[i];
send_counts_points[i] = send_counts[i] * dimensions;
send_displs_points[i] = send_displs[i] * dimensions;
recv_counts_points[i] = recv_counts[i] * dimensions;
recv_displs_points[i] = recv_displs[i] * dimensions;
}
// allocate new buffers for the points and the order vectors
T* point_buffer = new T[total_recv_items * dimensions];
std::vector<size_t> order_buffer(total_recv_items);
Clusters cluster_buffer(total_recv_items);
// actually transmit the data
MPI_Alltoallv(
static_cast<T*>(m_data.m_p), send_counts_points, send_displs_points, MPI_Types<T>::map(),
point_buffer, recv_counts_points, recv_displs_points, MPI_Types<T>::map(), MPI_COMM_WORLD
);
MPI_Alltoallv(
m_initial_order.data(), send_counts, send_displs, MPI_Types<size_t>::map(),
order_buffer.data(), recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD
);
MPI_Alltoallv(
clusters.data(), send_counts, send_displs, MPI_Types<size_t>::map(),
cluster_buffer.data(), recv_counts, recv_displs, MPI_LONG, MPI_COMM_WORLD
);
// assign the new data
delete[] static_cast<T*>(m_data.m_p);
m_data.m_p = point_buffer;
point_buffer = nullptr;
m_data.m_chunk[0] = total_recv_items;
m_initial_order.swap(order_buffer);
order_buffer.clear();
clusters.swap(cluster_buffer);
cluster_buffer.clear();
#endif
// only reordering step needed for non-MPI implementation and final local reordering for MPI version
// out-of-place rearranging of items
T* local_point_buffer = new T[m_initial_order.size() * dimensions];
std::vector<size_t> local_order_buffer(m_initial_order.size());
Clusters local_cluster_buffer(m_initial_order.size());
#pragma omp parallel for
for (size_t i = 0; i < m_initial_order.size(); ++i) {
const size_t copy_to = m_initial_order[i] - m_data.m_offset[0];
local_order_buffer[copy_to] = m_initial_order[i];
local_cluster_buffer[copy_to] = clusters[i];
for (size_t d = 0; d < dimensions; ++d) {
local_point_buffer[copy_to * dimensions + d] = static_cast<T*>(m_data.m_p)[i * dimensions + d];
}
}
clusters.swap(local_cluster_buffer);
m_initial_order.swap(local_order_buffer);
delete[] static_cast<T*>(m_data.m_p);
m_data.m_p = local_point_buffer;
}
};
#endif // SPATIAL_INDEX_H
|
atomic_write_codegen.c | // RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -target-cpu core2 -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -emit-pch -o %t %s
// RUN: %clang_cc1 -fopenmp-simd -x c -triple x86_64-apple-darwin10 -target-cpu core2 -include-pch %t -verify %s -emit-llvm -o - | FileCheck --check-prefix SIMD-ONLY0 %s
// SIMD-ONLY0-NOT: {{__kmpc|__tgt}}
// expected-no-diagnostics
// REQUIRES: x86-registered-target
#ifndef HEADER
#define HEADER
_Bool bv, bx;
char cv, cx;
unsigned char ucv, ucx;
short sv, sx;
unsigned short usv, usx;
int iv, ix;
unsigned int uiv, uix;
long lv, lx;
unsigned long ulv, ulx;
long long llv, llx;
unsigned long long ullv, ullx;
float fv, fx;
double dv, dx;
long double ldv, ldx;
_Complex int civ, cix;
_Complex float cfv, cfx;
_Complex double cdv, cdx;
typedef int int4 __attribute__((__vector_size__(16)));
int4 int4x;
struct BitFields {
int : 32;
int a : 31;
} bfx;
struct BitFields_packed {
int : 32;
int a : 31;
} __attribute__ ((__packed__)) bfx_packed;
struct BitFields2 {
int : 31;
int a : 1;
} bfx2;
struct BitFields2_packed {
int : 31;
int a : 1;
} __attribute__ ((__packed__)) bfx2_packed;
struct BitFields3 {
int : 11;
int a : 14;
} bfx3;
struct BitFields3_packed {
int : 11;
int a : 14;
} __attribute__ ((__packed__)) bfx3_packed;
struct BitFields4 {
short : 16;
int a: 1;
long b : 7;
} bfx4;
struct BitFields4_packed {
short : 16;
int a: 1;
long b : 7;
} __attribute__ ((__packed__)) bfx4_packed;
typedef float float2 __attribute__((ext_vector_type(2)));
float2 float2x;
// Register "0" is currently an invalid register for global register variables.
// Use "esp" instead of "0".
// register int rix __asm__("0");
register int rix __asm__("esp");
int main() {
// CHECK: store atomic i32 1, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @civ, i32 0, i32 1) monotonic,
#pragma omp atomic write
__imag(civ) = 1;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
cx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
ucx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
sx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i16
#pragma omp atomic write
usx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
uix = uiv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
lx = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
llx = llv;
// CHECK: load i64, i64*
// CHECK: store atomic i64
#pragma omp atomic write
ullx = ullv;
// CHECK: load float, float*
// CHECK: bitcast float {{.*}} to i32
// CHECK: store atomic i32 {{.*}}, i32* bitcast (float*
#pragma omp atomic write
fx = fv;
// CHECK: load double, double*
// CHECK: bitcast double {{.*}} to i64
// CHECK: store atomic i64 {{.*}}, i64* bitcast (double*
#pragma omp atomic write
dx = dv;
// CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80*
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128*
// CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ldv;
// CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = civ;
// CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1
// CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]]
// CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cfx = cfv;
// CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0)
// CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1)
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1
// CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]]
// CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 16, i8* bitcast ({ double, double }* @{{.*}} to i8*), i8* [[BITCAST]], i32 5)
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst write
cdx = cdv;
// CHECK: load i8, i8*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = bv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
#pragma omp atomic write
bx = cv;
// CHECK: load i8, i8*
// CHECK: store atomic i8
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write, seq_cst
cx = ucv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
ulx = sv;
// CHECK: load i16, i16*
// CHECK: store atomic i64
#pragma omp atomic write
lx = usv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic seq_cst, write
uix = iv;
// CHECK: load i32, i32*
// CHECK: store atomic i32
#pragma omp atomic write
ix = uiv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = lv;
// CHECK: load i64, i64*
// CHECK: store atomic i32 %{{.+}}, i32* bitcast (float*
#pragma omp atomic write
fx = ulv;
// CHECK: load i64, i64*
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double*
#pragma omp atomic write
dx = llv;
// CHECK: load i64, i64*
// CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8*
// CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false)
// CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]]
// CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128*
// CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80*
#pragma omp atomic write
ldx = ullv;
// CHECK: load float, float*
// CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32
// CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0
// CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1
// CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]]
// CHECK: store i32 0, i32* [[TEMP_IMG_REF]]
// CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8*
// CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0)
#pragma omp atomic write
cix = fv;
// CHECK: load double, double*
// CHECK: store atomic i16
#pragma omp atomic write
sx = dv;
// CHECK: load x86_fp80, x86_fp80*
// CHECK: store atomic i8
#pragma omp atomic write
bx = ldv;
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0)
// CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1)
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: icmp ne i32 %{{.+}}, 0
// CHECK: or i1
// CHECK: store atomic i8
#pragma omp atomic write
bx = civ;
// CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0)
// CHECK: store atomic i16
#pragma omp atomic write
usx = cfv;
// CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0)
// CHECK: store atomic i64
#pragma omp atomic write
llx = cdv;
// CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}}
// CHECK-DAG: load i8, i8*
// CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32
// CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128*
// CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]]
// CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]]
// CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic
// CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
int4x[sv] = bv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8*
// CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]],
// CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]],
// CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]],
// CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647
// CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx2_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11
// CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385
// CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24*
// CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8*
// CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0)
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}},
// CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]],
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24
// CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383
// CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3
// CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065
// CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i24 %{{.+}}, i24* [[TEMP]]
// CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8*
// CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8*
// CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0)
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx3_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64
// CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8
// CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.a = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17
// CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145
// CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4.b = ldv;
// CHECK: load x86_fp80, x86_fp80* @{{.+}}
// CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64
// CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8
// CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127
// CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1
// CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1
// CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]]
// CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]]
// CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]]
// CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic
// CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
bfx4_packed.b = ldv;
// CHECK: load i64, i64*
// CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float
// CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic
// CHECK: br label %[[CONT:.+]]
// CHECK: [[CONT]]
// CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ]
// CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64*
// CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]],
// CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0
// CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]]
// CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]]
// CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic
// CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0
// CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1
// CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]]
// CHECK: [[EXIT]]
#pragma omp atomic write
float2x.x = ulv;
// CHECK: call i32 @llvm.read_register.i32(
// CHECK: sitofp i32 %{{.+}} to double
// CHECK: bitcast double %{{.+}} to i64
// CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst
// CHECK: call{{.*}} @__kmpc_flush(
#pragma omp atomic write seq_cst
dv = rix;
return 0;
}
#endif
|
GB_binop__islt_int16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__islt_int16
// A.*B function (eWiseMult): GB_AemultB__islt_int16
// A*D function (colscale): GB_AxD__islt_int16
// D*A function (rowscale): GB_DxB__islt_int16
// C+=B function (dense accum): GB_Cdense_accumB__islt_int16
// C+=b function (dense accum): GB_Cdense_accumb__islt_int16
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__islt_int16
// C=scalar+B GB_bind1st__islt_int16
// C=scalar+B' GB_bind1st_tran__islt_int16
// C=A+scalar GB_bind2nd__islt_int16
// C=A'+scalar GB_bind2nd_tran__islt_int16
// C type: int16_t
// A type: int16_t
// B,b type: int16_t
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
int16_t
#define GB_BTYPE \
int16_t
#define GB_CTYPE \
int16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int16_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int16_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x < y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_INT16 || GxB_NO_ISLT_INT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__islt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__islt_int16
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__islt_int16
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int16_t
int16_t bwork = (*((int16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__islt_int16
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__islt_int16
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *GB_RESTRICT Cx = (int16_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__islt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__islt_int16
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__islt_int16
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t *Cx = (int16_t *) Cx_output ;
int16_t x = (*((int16_t *) x_input)) ;
int16_t *Bx = (int16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int16_t bij = Bx [p] ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__islt_int16
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int16_t *Cx = (int16_t *) Cx_output ;
int16_t *Ax = (int16_t *) Ax_input ;
int16_t y = (*((int16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int16_t aij = Ax [p] ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB_bind1st_tran__islt_int16
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t x = (*((const int16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int16_t aij = Ax [pA] ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB_bind2nd_tran__islt_int16
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int16_t y = (*((const int16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
static_general_builder_and_solver.h | //
// Project Name: Kratos
// Last Modified by: $Author: mpetracca $
// Date: $Date: 2013-06-06 10:37:00 $
// Revision: $Revision: 1.00 $
//
//
#if !defined(KRATOS_STATIC_GENERAL_BUILDER_AND_SOLVER )
#define KRATOS_STATIC_GENERAL_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <iomanip>
#ifdef _OPENMP
#define STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
#endif
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
#include <omp.h>
#endif
#define STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
/* External includes */
#include "boost/smart_ptr.hpp"
#include "utilities/timer.h"
/* Project includes */
#include "multiscale_application_variables.h"
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
namespace Kratos
{
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
*/
template<class TSparseSpace,class TDenseSpace,class TLinearSolver>
class StaticGeneralBuilderAndSolver
: public BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >
{
public:
KRATOS_CLASS_POINTER_DEFINITION(StaticGeneralBuilderAndSolver);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
public:
StaticGeneralBuilderAndSolver(typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace, TDenseSpace, TLinearSolver >(pNewLinearSystemSolver)
{
mTotalBuildTime = 0.0;
mTotalSolutionTime = 0.0;
mTotalBuildTime_accum = 0.0;
mTotalSolutionTime_accum = 0.0;
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
mDoFactorization = false;
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
virtual ~StaticGeneralBuilderAndSolver()
{
}
public:
void Build(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& b)
{
double time_begin = Timer::GetTime();
if (!pScheme)
KRATOS_THROW_ERROR( std::runtime_error, "No scheme provided!", "" )
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// assemble all elements
#ifndef STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
AssembleRHS(b, RHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
AssembleRHS(b, RHS_Contribution, EquationId);
}
#else
//creating an array of lock variables of the size of the system matrix
std::vector< omp_lock_t > lock_array(A.size1());
int A_size = A.size1();
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
{
KRATOS_WATCH( number_of_threads )
KRATOS_WATCH( element_partition )
}
double start_prod = omp_get_wtime();
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
#pragma omp critical
{
pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
}
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
}
}
double stop_prod = omp_get_wtime();
if (this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
KRATOS_WATCH( "finished parallel building" )
#endif
mTotalBuildTime += Timer::GetTime() - time_begin;
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
mDoFactorization = true;
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
void BuildLHS(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A)
{
double time_begin = Timer::GetTime();
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A, LHS_Contribution, EquationId);
}
mTotalBuildTime += Timer::GetTime() - time_begin;
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
mDoFactorization = true;
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
void BuildLHS_CompleteOnFreeRows(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A)
{
double time_begin = Timer::GetTime();
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0, 0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it, LHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A, LHS_Contribution, EquationId);
}
mTotalBuildTime += Timer::GetTime() - time_begin;
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
mDoFactorization = true;
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
void SystemSolve(TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
double time_begin = Timer::GetTime();
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
if(mDoFactorization) {
BaseType::mpLinearSystemSolver->InitializeSolutionStep(A, Dx, b);
BaseType::mpLinearSystemSolver->PerformSolutionStep(A, Dx, b);
mDoFactorization = false;
}
else {
BaseType::mpLinearSystemSolver->PerformSolutionStep(A, Dx, b);
}
#else
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
else
TSparseSpace::SetToZero(Dx);
mTotalSolutionTime += Timer::GetTime() - time_begin;
}
void SystemSolveWithPhysics(TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b, ModelPart& r_model_part)
{
double time_begin = Timer::GetTime();
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
if(BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded() )
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, r_model_part);
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
if(mDoFactorization) {
BaseType::mpLinearSystemSolver->InitializeSolutionStep(A, Dx, b);
BaseType::mpLinearSystemSolver->PerformSolutionStep(A, Dx, b);
mDoFactorization = false;
}
else {
BaseType::mpLinearSystemSolver->PerformSolutionStep(A, Dx, b);
}
#else
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
}
else
{
TSparseSpace::SetToZero(Dx);
if( this->GetEchoLevel() > 0 && r_model_part.GetCommunicator().MyPID() == 0)
std::cout << "ATTENTION! setting the RHS to zero!" << std::endl;
}
mTotalSolutionTime += Timer::GetTime() - time_begin;
}
void BuildAndSolve(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
bool CalculateReactionsFlag = BaseType::mCalculateReactionsFlag;
BaseType::mCalculateReactionsFlag = false;
this->Build(pScheme, r_model_part, A, b);
BaseType::mCalculateReactionsFlag = CalculateReactionsFlag;
SystemSolveWithPhysics(A, Dx, b, r_model_part);
}
void BuildRHSAndSolve(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
bool CalculateReactionsFlag = BaseType::mCalculateReactionsFlag;
BaseType::mCalculateReactionsFlag = false;
this->BuildRHS(pScheme, r_model_part, b);
BaseType::mCalculateReactionsFlag = CalculateReactionsFlag;
SystemSolve(A, Dx, b);
}
void BuildRHS(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemVectorType& b)
{
double time_begin = Timer::GetTime();
if (!pScheme)
KRATOS_THROW_ERROR( std::runtime_error, "No scheme provided!", "" )
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
// assemble all elements
#ifndef STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//vector containing the localization in the system of the different terms
Element::EquationIdVectorType EquationId;
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
LHS_Contribution.resize(0, 0, false);
RHS_Contribution.resize(0, false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it = ConditionsArray.ptr_begin(); it != ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId);
}
#else
//creating an array of lock variables of the size of the system vector
/*std::vector< omp_lock_t > lock_array(b.size());*/
int total_size = b.size();
if (BaseType::mCalculateReactionsFlag)
total_size += (*BaseType::mpReactionsVector).size();
std::vector< omp_lock_t > lock_array(total_size);
//int b_size = b.size();
for (int i = 0; i < total_size; i++)
omp_init_lock(&lock_array[i]);
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
{
KRATOS_WATCH( number_of_threads )
KRATOS_WATCH( element_partition )
}
double start_prod = omp_get_wtime();
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
#pragma omp critical
{
pScheme->Calculate_RHS_Contribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
}
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId, lock_array);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin = ConditionsArray.ptr_begin() + condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end = ConditionsArray.ptr_begin() + condition_partition[k + 1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it, RHS_Contribution, EquationId, CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b, RHS_Contribution, EquationId, lock_array);
}
}
double stop_prod = omp_get_wtime();
if (this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
std::cout << "time: " << stop_prod - start_prod << std::endl;
for (int i = 0; i < total_size; i++)
omp_destroy_lock(&lock_array[i]);
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
{
KRATOS_WATCH( "finished parallel building" )
}
#endif
mTotalBuildTime += Timer::GetTime() - time_begin;
}
void SetUpDofSet(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part)
{
KRATOS_TRY;
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
std::cout << " Setting up the dofs " << std::endl;
ElementsArrayType& pElements = r_model_part.Elements();
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
for (typename ElementsArrayType::ptr_iterator it = pElements.ptr_begin(); it != pElements.ptr_end(); ++it)
{
pScheme->GetElementalDofList(*it, ElementalDofList, CurrentProcessInfo);
for (typename Element::DofsVectorType::iterator i = ElementalDofList.begin(); i != ElementalDofList.end(); ++i)
Doftemp.push_back(i->get());
}
ConditionsArrayType& pConditions = r_model_part.Conditions();
for (typename ConditionsArrayType::ptr_iterator it = pConditions.ptr_begin(); it != pConditions.ptr_end(); ++it)
{
pScheme->GetConditionDofList(*it, ElementalDofList, CurrentProcessInfo);
for (typename Element::DofsVectorType::iterator i = ElementalDofList.begin(); i != ElementalDofList.end(); ++i)
Doftemp.push_back(i->get());
}
Doftemp.Unique();
BaseType::mDofSet = Doftemp;
if (BaseType::mDofSet.size() == 0)
KRATOS_THROW_ERROR( std::logic_error, "No degrees of freedom!", "" )
BaseType::mDofSetIsInitialized = true;
if( this->GetEchoLevel() > 2 && r_model_part.GetCommunicator().MyPID() == 0)
std::cout << "finished setting up the dofs" << std::endl;
KRATOS_CATCH( "" )
}
void SetUpSystem(ModelPart& r_model_part)
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme,TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart)
{
KRATOS_TRY
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart.Elements(), rModelPart.Conditions(), rModelPart.GetProcessInfo());
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH( "it should not come here!!!!!!!! ... this is SLOW" )
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart.Elements(), rModelPart.Conditions(), rModelPart.GetProcessInfo());
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size() - BaseType::mEquationSystemSize;
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
KRATOS_CATCH( "" )
}
void InitializeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
mTotalBuildTime = 0.0;
mTotalSolutionTime = 0.0;
}
void FinalizeSolutionStep(ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
mTotalBuildTime_accum += mTotalBuildTime;
mTotalSolutionTime_accum += mTotalSolutionTime;
if( this->GetEchoLevel() > 0 && r_model_part.GetCommunicator().MyPID() == 0)
PromptStatistics(r_model_part.GetProcessInfo());
}
void CalculateReactions(typename TSchemeType::Pointer pScheme, ModelPart& r_model_part, TSystemMatrixType& A, TSystemVectorType& Dx, TSystemVectorType& b)
{
////refresh RHS to have the correct reactions
//this->BuildRHS(pScheme, r_model_part, b);
int i;
int systemsize = BaseType::mDofSet.size() - TSparseSpace::Size(*BaseType::mpReactionsVector);
typename DofsArrayType::ptr_iterator it2;
// KRATOS_WATCH( *BaseType::mpReactionsVector )
//updating variables
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
int num=1;
for (it2 = BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2)
{
if ((*it2)->IsFixed())
{
i = (*it2)->EquationId();
i -= systemsize;
(*it2)->GetSolutionStepReactionValue() = ReactionsVector[i];
}
num++;
}
}
void Clear()
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
this->mpLinearSystemSolver->Clear();
}
virtual int Check(ModelPart& r_model_part)
{
return 0;
}
protected:
virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme,TSystemMatrixType& A,
ModelPart& rModelPart)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
Element::EquationIdVectorType ids(3, 0);
for (typename ElementsContainerType::iterator i_element = rModelPart.ElementsBegin(); i_element != rModelPart.ElementsEnd(); i_element++)
{
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
if (ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for (std::size_t j = 0; j < ids.size(); j++)
if (ids[j] < equation_size)
{
AddUnique(row_indices, ids[j]);
}
}
}
for (typename ConditionsArrayType::iterator i_condition = rModelPart.ConditionsBegin(); i_condition != rModelPart.ConditionsEnd(); i_condition++)
{
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
for (std::size_t i = 0; i < ids.size(); i++)
if (ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for (std::size_t j = 0; j < ids.size(); j++)
if (ids[j] < equation_size)
{
AddUnique(row_indices, ids[j]);
}
}
}
//allocating the memory needed
int data_size = 0;
for (std::size_t i = 0; i < indices.size(); i++)
{
data_size += indices[i].size();
}
A.reserve(data_size, false);
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
#ifndef STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
for (std::size_t i = 0; i < indices.size(); i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
#else
int number_of_threads = omp_get_max_threads();
vector<unsigned int> matrix_partition;
CreatePartition(number_of_threads, indices.size(), matrix_partition);
if (this->GetEchoLevel() > 2)
{
KRATOS_WATCH( matrix_partition )
}
for (int k = 0; k < number_of_threads; k++)
{
#pragma omp parallel
if (omp_get_thread_num() == k)
{
for (std::size_t i = matrix_partition[k]; i < matrix_partition[k + 1]; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for (std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end(); it++)
{
A.push_back(i, *it, 0.00);
}
row_indices.clear();
}
}
}
#endif
Timer::Stop("MatrixStructure");
}
void AssembleLHS(TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
void AssembleRHS(TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false) //if we don't need to calculate reactions
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
else //when the calculation of reactions is needed
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
else //on "fixed" DOFs
{
// Assembling the Vector of REACTIONS
ReactionsVector[i_global - BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
}
}
}
}
void AssembleLHS_CompleteOnFreeRows(TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
private:
double mTotalBuildTime;
double mTotalSolutionTime;
double mTotalBuildTime_accum;
double mTotalSolutionTime_accum;
unsigned int mNumberOfFreeDofs;
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
bool mDoFactorization;
#endif // STATIC_GENERAL_BUILDER_AND_SOLVER_OPTIMIZE_SOLUTION
private:
inline void PromptStatistics(const ProcessInfo & rCurrentProcessInfo)
{
std::stringstream ss;
ss << " Total Build Time: \t" << mTotalBuildTime << " seconds" << std::endl;
ss << " Total Solution Time: \t" << mTotalSolutionTime << " seconds" << std::endl;
int nit = rCurrentProcessInfo[NL_ITERATION_NUMBER];
double dnit = nit == 0.0 ? 1.0 : (double)nit;
ss << " Number of Iterations:\t" << nit << std::endl;
ss << " Mean Build Time: \t" << mTotalBuildTime / dnit << " seconds" << std::endl;
ss << " Mean Solution Time: \t" << mTotalSolutionTime / dnit << " seconds" << std::endl;
ss << " Total Build Time from start: " << mTotalBuildTime_accum << " seconds" << std::endl;
ss << " Total Solution Time from start: " << mTotalSolutionTime_accum << " seconds" << std::endl;
ss << " Total Time from start: " << mTotalSolutionTime_accum + mTotalBuildTime_accum << " seconds" << std::endl;
std::cout << ss.str();
}
#ifdef STATIC_GENERAL_BUILDER_AND_SOLVER_USES_OPENMP
void Assemble(TSystemMatrixType& A,
TSystemVectorType& b,
const LocalSystemMatrixType& LHS_Contribution,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array)
{
unsigned int local_size = RHS_Contribution.size();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
omp_unset_lock(&lock_array[i_global]);
}
}
}
void AssembleLHS(TSystemMatrixType& A,
const LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
omp_set_lock(&lock_array[i_global]);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
omp_unset_lock(&lock_array[i_global]);
}
}
}
void AssembleRHS(TSystemVectorType& b,
const LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId,
std::vector< omp_lock_t >& lock_array)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution(i_local);
omp_unset_lock(&lock_array[i_global]);
}
}
}
else
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //on "free" DOFs
{
omp_set_lock(&lock_array[i_global]);
b[i_global] += RHS_Contribution[i_local];
omp_unset_lock(&lock_array[i_global]);
}
else //on "fixed" DOFs
{
// bug fixed: lock_array now has dimension(=num_free + num_fixed) if
// calculate reaction is needed
omp_set_lock(&lock_array[i_global/* - BaseType::mEquationSystemSize*/]);
ReactionsVector[i_global - BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
omp_unset_lock(&lock_array[i_global/* - BaseType::mEquationSystemSize*/]);
}
}
}
}
#endif
};
}
#endif /* KRATOS_STATIC_GENERAL_BUILDER_AND_SOLVER defined */
|
DRB095-doall2-taskloop-orig-yes.c | /*
Copyright (C) 1991-2018 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it andor
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http:www.gnu.org/licenses/>.
*/
/*
This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it.
*/
/*
glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default.
*/
/*
wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is
synchronized with ISOIEC 10646:2017, fifth edition, plus
the following additions from Amendment 1 to the fifth edition:
- 56 emoji characters
- 285 hentaigana
- 3 additional Zanabazar Square characters
*/
/*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https:github.comLLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
Two-dimensional array computation:
Only one loop is associated with omp taskloop.
The inner loop's loop iteration variable will be shared if it is shared in the enclosing context.
Data race pairs (we allow multiple ones to preserve the pattern):
Write_set = {j@69:14, j@69:30}
Read_set = {j@69:21, j@69:30, j@70:16}
Any pair from Write_set vs. Write_set and Write_set vs. Read_set is a data race pair.
*/
#include <stdio.h>
int a[100][100];
int main()
{
int i, j;
int _ret_val_0;
#pragma cetus private(i, j)
#pragma loop name main#0
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#0#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
a[i][j]=(i+j);
}
}
#pragma cetus private(i, j)
#pragma loop name main#1
#pragma cetus parallel
#pragma omp parallel for private(i, j)
for (i=0; i<100; i ++ )
{
#pragma cetus private(j)
#pragma loop name main#1#0
#pragma cetus parallel
#pragma omp parallel for private(j)
for (j=0; j<100; j ++ )
{
a[i][j]+=1;
}
}
printf("a[50][50]=%d\n", a[50][50]);
_ret_val_0=0;
return _ret_val_0;
}
|
kdtree_ann.h | /* kdtree_ann.h
*
* Author: Fabian Meyer
* Created On: 23 Jan 2019
*/
#ifndef KDT_KDTREE_ANN_H_
#define KDT_KDTREE_ANN_H_
#include <Eigen/Geometry>
#include <ANN/ANN.h>
namespace kdt
{
class KDTreeAnn
{
public:
typedef ANNcoord Scalar;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic> Matrix;
typedef Eigen::Matrix<Scalar, Eigen::Dynamic, 1> Vector;
typedef typename Matrix::Index Index;
typedef Eigen::Matrix<int, Eigen::Dynamic, Eigen::Dynamic> MatrixI;
private:
Matrix dataCopy_;
Matrix *dataPoints_;
ANNkd_tree *kdtree_;
Scalar epsilon_;
Scalar maxDist_;
int threads_;
public:
KDTreeAnn()
: dataCopy_(), dataPoints_(nullptr), kdtree_(nullptr), epsilon_(0),
maxDist_(0), threads_(1)
{
}
KDTreeAnn(Matrix &data, const bool copy = false)
: KDTreeAnn()
{
setData(data, copy);
}
~KDTreeAnn()
{
clear();
}
void setThreads(const int threads)
{
threads_ = threads;
}
void setEpsilon(const Scalar eps)
{
epsilon_ = eps;
}
void setMaxDistance(const Scalar dist)
{
maxDist_ = dist;
}
void setData(Matrix &data, const bool copy = false)
{
if(copy)
{
dataCopy_ = data;
dataPoints_ = &dataCopy_;
}
else
{
dataPoints_ = &data;
}
clear();
}
void build()
{
if(dataPoints_ == nullptr)
throw std::runtime_error("cannot build KDTree; data not set");
if(kdtree_ != nullptr)
delete kdtree_;
ANNpointArray dataPts = dataPoints_->data();
kdtree_ = new ANNkd_tree(dataPts, dataPoints_->cols(), dataPoints_->rows());
}
void query(Matrix &queryPoints,
const size_t knn,
MatrixI &indices,
Matrix &distances)
{
if(kdtree_ == nullptr)
throw std::runtime_error("cannot query KDTree; not built yet");
if(dimension() != queryPoints.rows())
throw std::runtime_error("cannot query KDTree; KDTree has different dimension than query data");
distances.setZero(knn, queryPoints.cols());
indices.setConstant(knn, queryPoints.cols(), -1);
Scalar maxDistSq = maxDist_ * maxDist_;
#pragma omp parallel num_threads(threads_)
for(Index i = 0; i < queryPoints.cols(); ++i)
{
ANNpoint p = &queryPoints.data()[i * queryPoints.rows()];
ANNidxArray idx = &indices.data()[i * knn];
ANNdistArray dists = &distances.data()[i * knn];
if(maxDist_ > 0)
kdtree_->annkFRSearch(p, maxDistSq, knn, idx, dists, epsilon_);
else
kdtree_->annkSearch(p, knn, idx, dists, epsilon_);
}
}
Index size() const
{
return dataPoints_ == nullptr ? 0 : dataPoints_->cols();
}
Index dimension() const
{
return dataPoints_ == nullptr ? 0 : dataPoints_->rows();
}
void clear()
{
if(kdtree_ != nullptr)
{
delete kdtree_;
kdtree_ = nullptr;
}
}
};
}
#endif
|
omp_pause_resource.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include "omp_testsuite.h"
int test_omp_pause_resource() {
int fails, nthreads, my_dev;
fails = 0;
nthreads = 0;
my_dev = omp_get_initial_device();
#pragma omp parallel
#pragma omp single
nthreads = omp_get_num_threads();
if (omp_pause_resource(omp_pause_soft, my_dev))
fails++;
#pragma omp parallel shared(nthreads)
#pragma omp single
nthreads = omp_get_num_threads();
if (nthreads == 0)
fails++;
if (omp_pause_resource(omp_pause_hard, my_dev))
fails++;
nthreads = 0;
#pragma omp parallel shared(nthreads)
#pragma omp single
nthreads = omp_get_num_threads();
if (nthreads == 0)
fails++;
if (omp_pause_resource_all(omp_pause_soft))
fails++;
nthreads = 0;
#pragma omp parallel shared(nthreads)
#pragma omp single
nthreads = omp_get_num_threads();
if (nthreads == 0)
fails++;
return fails == 0;
}
int main() {
int i;
int num_failed = 0;
for (i = 0; i < REPETITIONS; i++) {
if (!test_omp_pause_resource()) {
num_failed++;
}
}
return num_failed;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.