source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
utils.c | #include "utils.h"
/*
void merge_scores(int * scores, char ** titles, unsigned long int size) {
unsigned long int i1 = 0;
unsigned long int i2 = size / 2;
unsigned long int it = 0;
// allocate memory for temporary buffers
char ** tmp2 = (char **) malloc(size*sizeof(char *));
int * tmp3 = (int *) malloc (size*sizeof(int));
while(i1 < size/2 && i2 < size) {
if (scores[i1] > scores[i2]) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
}
else {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2 ++;
}
it ++;
}
while (i1 < size/2) {
tmp2[it] = titles[i1];
tmp3[it] = scores[i1];
i1++;
it++;
}
while (i2 < size) {
tmp2[it] = titles[i2];
tmp3[it] = scores[i2];
i2++;
it++;
}
memcpy(titles, tmp2, size*sizeof(char *));
memcpy(scores, tmp3, size*sizeof(int));
free(tmp2);
free(tmp3);
}
void mergesort_scores_serial(int * scores, char ** titles, unsigned long int size) {
int tmp_score;
char * tmp_seq;
if (size == 2) {
if (scores[0] <= scores[1]) {
// swap scores
tmp_score = scores[0];
scores[0] = scores[1];
scores[1] = tmp_score;
// swap titles
tmp_seq = titles[0];
titles[0] = titles[1];
titles[1] = tmp_seq;
}
} else {
if (size > 2){
mergesort_scores_serial(scores, titles, size/2);
mergesort_scores_serial(scores + size/2, titles + size/2, size - size/2);
merge_scores(scores, titles, size);
}
}
}
void sort_scores (int * scores, char ** titles, unsigned long int size, int threads) {
if ( threads == 1) {
mergesort_scores_serial(scores, titles, size);
}
else if (threads > 1) {
#pragma omp parallel sections num_threads(threads)
{
#pragma omp section
sort_scores(scores, titles, size/2, threads/2);
#pragma omp section
sort_scores(scores + size/2, titles + size/2, size-size/2, threads-threads/2);
}
merge_scores(scores, titles, size);
} // threads > 1
}
*/
// Wall time
double dwalltime()
{
double sec;
struct timeval tv;
gettimeofday(&tv,NULL);
sec = tv.tv_sec + tv.tv_usec/1000000.0;
return sec;
}
|
MatrixInverseModel.h | /*
* Copyright 2016 [See AUTHORS file for list of authors]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef _MATRIXINVERSEMODEL_
#define _MATRIXINVERSEMODEL_
#include <iomanip>
#include <sstream>
#include "Model.h"
DEFINE_int32(n_power_iterations, 10, "Number of power iterations to run to calculate lambda.");
class MatrixInverseModel : public Model {
private:
int n_coords;
double lambda;
std::vector<double> model;
std::vector<double> B;
void Initialize(const std::string &input_line) {
// Input line should have a single number containing
// number of coordinates (# of rows/columns in square matrix).
std::stringstream input(input_line);
input >> n_coords;
model.resize(n_coords);
// Set elements in model to be a random number in range.
for (int i = 0; i < n_coords; i++) {
model[i] = rand() % FLAGS_random_range;
}
}
void MatrixVectorMultiply(const std::vector<Datapoint *> &datapoints,
std::vector<double> &input_vector,
std::vector<double> &output_vector) {
// Write to temporary vector to allow for input_vector
// and output_vector referencing the same vector.
std::vector<double> temp_vector;
for (const auto &datapoint : datapoints) {
double cross_product = 0;
// Each datapoint is like a sparse row in the sparse matrix.
for (int i = 0; i < datapoint->GetWeights().size(); i++) {
int index = datapoint->GetCoordinates()[i];
double weight = datapoint->GetWeights()[i];
cross_product += input_vector[index] * weight;
}
temp_vector.push_back(cross_product);
}
// Copy over.
std::copy(temp_vector.begin(), temp_vector.end(), output_vector.begin());
// Do some basic error checking of vector lengths.
if (temp_vector.size() != output_vector.size() ||
temp_vector.size() != n_coords) {
std::cerr << "MatrixInverseModel: Wrong size after matrix vector multiply." << std::endl;
std::cerr << output_vector.size() << " " << temp_vector.size() << " " << n_coords << std::endl;
exit(0);
}
}
void Normalize(std::vector<double> &vec) {
double norm = 0;
for (int i = 0; i < vec.size(); i++) {
norm += vec[i] * vec[i];
}
norm = sqrt(norm);
for (int i = 0; i < vec.size(); i++) {
vec[i] /= norm;
}
}
std::vector<Datapoint *> TransposeSparseMatrix(const std::vector<Datapoint *> &d) {
std::vector<Datapoint *> r;
for (int i = 0; i < d.size(); i++) {
r.push_back(new MatrixInverseDatapoint(std::to_string(i), i));
}
for (int row = 0; row < d.size(); row++) {
for (int i = 0; i < d[row]->GetWeights().size(); i++) {
int column_index = d[row]->GetCoordinates()[i];
double weight = d[row]->GetWeights()[i];
r[column_index]->GetCoordinates().push_back(row);
r[column_index]->GetWeights().push_back(weight);
}
}
return r;
}
public:
MatrixInverseModel(const std::string &input_line) {
Initialize(input_line);
}
~MatrixInverseModel() {
}
void SetUp(const std::vector<Datapoint *> &datapoints) override {
// Normalize the rows formed by the datapoint.
for (int dp = 0; dp < datapoints.size(); dp++) {
double sum_sqr = 0;
for (const auto &w : datapoints[dp]->GetWeights()) {
sum_sqr += w*w;
}
double norm_factor = sqrt(sum_sqr);
for (auto &w : datapoints[dp]->GetWeights()) {
w /= norm_factor;
}
for (auto &m_w : ((MatrixInverseDatapoint *)datapoints[dp])->coordinate_weight_map) {
m_w.second /= norm_factor;
}
}
// Let B be norm(model^2 * random_vector).
B.resize(n_coords);
std::vector<double> random_vector;
for (int i = 0; i < n_coords; i++) {
random_vector.push_back(rand() % FLAGS_random_range);
}
MatrixVectorMultiply(datapoints, random_vector, B);
MatrixVectorMultiply(datapoints, B, B);
Normalize(B);
// Calculate lambda via power iteration.
std::vector<Datapoint *> transpose = TransposeSparseMatrix(datapoints);
std::vector<double> x_k, x_k_prime;
for (int i = 0; i < n_coords; i++) {
x_k.push_back(rand() % FLAGS_random_range);
x_k_prime.push_back(0);
}
for (int i = 0; i < FLAGS_n_power_iterations; i++) {
MatrixVectorMultiply(datapoints, x_k, x_k);
MatrixVectorMultiply(transpose, x_k, x_k);
Normalize(x_k);
}
MatrixVectorMultiply(datapoints, x_k, x_k_prime);
MatrixVectorMultiply(transpose, x_k_prime, x_k_prime);
lambda = 0;
for (int i = 0; i < n_coords; i++) {
lambda += x_k_prime[i] * 1.1 * x_k[i];
}
// Free memory of transpose sparse matrix.
for_each(transpose.begin(), transpose.end(), std::default_delete<Datapoint>());
}
double ComputeLoss(const std::vector<Datapoint *> &datapoints) override {
double loss = 0;
double sum_sqr = 0, second = 0;
for (int i = 0; i < n_coords; i++) {
second += model[i] * B[i];
sum_sqr += model[i] * model[i];
}
#pragma omp parallel for num_threads(FLAGS_n_threads) reduction(+:loss)
for (int i = 0; i < datapoints.size(); i++) {
double ai_t_x = 0;
double first = sum_sqr / (double)n_coords * lambda;
for (int j = 0; j < datapoints[i]->GetWeights().size(); j++) {
int index = datapoints[i]->GetCoordinates()[j];
double weight = datapoints[i]->GetWeights()[j];
ai_t_x += model[index] * weight;
}
first -= ai_t_x * ai_t_x;
loss += first / 2 - second / (double)n_coords;
}
return loss + 2;
}
int NumParameters() override {
return n_coords;
}
int CoordinateSize() override {
return 1;
}
std::vector<double> & ModelData() override {
return model;
}
void PrecomputeCoefficients(Datapoint *datapoint, Gradient *g, std::vector<double> &local_model) override {
if (g->coeffs.size() != n_coords) g->coeffs.resize(n_coords);
const std::vector<double> &weights = datapoint->GetWeights();
const std::vector<int> &coordinates = datapoint->GetCoordinates();
double product = 0;
for (int i = 0; i < coordinates.size(); i++) {
product += local_model[coordinates[i]] * weights[i];
}
for (int i = 0; i < coordinates.size(); i++) {
int index = coordinates[i];
g->coeffs[index] = product * weights[i];
}
}
void Lambda(int coordinate, double &out, std::vector<double> &local_model) override {
out = lambda / (double)n_coords;
}
void Kappa(int coordinate, std::vector<double> &out, std::vector<double> &local_model) override {
out[0] = B[coordinate] / (double)n_coords;
}
void H_bar(int coordinate, std::vector<double> &out, Gradient *g, std::vector<double> &local_model) override {
out[0] = g->coeffs[coordinate];
}
};
#endif
|
Parser.h | //===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Parser interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_PARSE_PARSER_H
#define LLVM_CLANG_PARSE_PARSER_H
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Availability.h"
#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/OperatorPrecedence.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Lex/CodeCompletionHandler.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Sema.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/SaveAndRestore.h"
#include <memory>
#include <stack>
namespace clang {
class PragmaHandler;
class Scope;
class BalancedDelimiterTracker;
class CorrectionCandidateCallback;
class DeclGroupRef;
class DiagnosticBuilder;
struct LoopHint;
class Parser;
class ParsingDeclRAIIObject;
class ParsingDeclSpec;
class ParsingDeclarator;
class ParsingFieldDeclarator;
class ColonProtectionRAIIObject;
class InMessageExpressionRAIIObject;
class PoisonSEHIdentifiersRAIIObject;
class OMPClause;
class ObjCTypeParamList;
class ObjCTypeParameter;
/// Parser - This implements a parser for the C family of languages. After
/// parsing units of the grammar, productions are invoked to handle whatever has
/// been read.
///
class Parser : public CodeCompletionHandler {
friend class ColonProtectionRAIIObject;
friend class ParsingOpenMPDirectiveRAII;
friend class InMessageExpressionRAIIObject;
friend class PoisonSEHIdentifiersRAIIObject;
friend class ObjCDeclContextSwitch;
friend class ParenBraceBracketBalancer;
friend class BalancedDelimiterTracker;
Preprocessor &PP;
/// Tok - The current token we are peeking ahead. All parsing methods assume
/// that this is valid.
Token Tok;
// PrevTokLocation - The location of the token we previously
// consumed. This token is used for diagnostics where we expected to
// see a token following another token (e.g., the ';' at the end of
// a statement).
SourceLocation PrevTokLocation;
/// Tracks an expected type for the current token when parsing an expression.
/// Used by code completion for ranking.
PreferredTypeBuilder PreferredType;
unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0;
unsigned short MisplacedModuleBeginCount = 0;
/// Actions - These are the callbacks we invoke as we parse various constructs
/// in the file.
Sema &Actions;
DiagnosticsEngine &Diags;
/// ScopeCache - Cache scopes to reduce malloc traffic.
enum { ScopeCacheSize = 16 };
unsigned NumCachedScopes;
Scope *ScopeCache[ScopeCacheSize];
/// Identifiers used for SEH handling in Borland. These are only
/// allowed in particular circumstances
// __except block
IdentifierInfo *Ident__exception_code,
*Ident___exception_code,
*Ident_GetExceptionCode;
// __except filter expression
IdentifierInfo *Ident__exception_info,
*Ident___exception_info,
*Ident_GetExceptionInfo;
// __finally
IdentifierInfo *Ident__abnormal_termination,
*Ident___abnormal_termination,
*Ident_AbnormalTermination;
/// Contextual keywords for Microsoft extensions.
IdentifierInfo *Ident__except;
mutable IdentifierInfo *Ident_sealed;
/// Ident_super - IdentifierInfo for "super", to support fast
/// comparison.
IdentifierInfo *Ident_super;
/// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and
/// "bool" fast comparison. Only present if AltiVec or ZVector are enabled.
IdentifierInfo *Ident_vector;
IdentifierInfo *Ident_bool;
/// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison.
/// Only present if AltiVec enabled.
IdentifierInfo *Ident_pixel;
/// Objective-C contextual keywords.
IdentifierInfo *Ident_instancetype;
/// Identifier for "introduced".
IdentifierInfo *Ident_introduced;
/// Identifier for "deprecated".
IdentifierInfo *Ident_deprecated;
/// Identifier for "obsoleted".
IdentifierInfo *Ident_obsoleted;
/// Identifier for "unavailable".
IdentifierInfo *Ident_unavailable;
/// Identifier for "message".
IdentifierInfo *Ident_message;
/// Identifier for "strict".
IdentifierInfo *Ident_strict;
/// Identifier for "replacement".
IdentifierInfo *Ident_replacement;
/// Identifiers used by the 'external_source_symbol' attribute.
IdentifierInfo *Ident_language, *Ident_defined_in,
*Ident_generated_declaration;
/// C++11 contextual keywords.
mutable IdentifierInfo *Ident_final;
mutable IdentifierInfo *Ident_GNU_final;
mutable IdentifierInfo *Ident_override;
// C++2a contextual keywords.
mutable IdentifierInfo *Ident_import;
mutable IdentifierInfo *Ident_module;
// C++ type trait keywords that can be reverted to identifiers and still be
// used as type traits.
llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits;
std::unique_ptr<PragmaHandler> AlignHandler;
std::unique_ptr<PragmaHandler> GCCVisibilityHandler;
std::unique_ptr<PragmaHandler> OptionsHandler;
std::unique_ptr<PragmaHandler> PackHandler;
std::unique_ptr<PragmaHandler> MSStructHandler;
std::unique_ptr<PragmaHandler> UnusedHandler;
std::unique_ptr<PragmaHandler> WeakHandler;
std::unique_ptr<PragmaHandler> RedefineExtnameHandler;
std::unique_ptr<PragmaHandler> FPContractHandler;
std::unique_ptr<PragmaHandler> OpenCLExtensionHandler;
std::unique_ptr<PragmaHandler> OpenMPHandler;
std::unique_ptr<PragmaHandler> PCSectionHandler;
std::unique_ptr<PragmaHandler> MSCommentHandler;
std::unique_ptr<PragmaHandler> MSDetectMismatchHandler;
std::unique_ptr<PragmaHandler> MSPointersToMembers;
std::unique_ptr<PragmaHandler> MSVtorDisp;
std::unique_ptr<PragmaHandler> MSInitSeg;
std::unique_ptr<PragmaHandler> MSDataSeg;
std::unique_ptr<PragmaHandler> MSBSSSeg;
std::unique_ptr<PragmaHandler> MSConstSeg;
std::unique_ptr<PragmaHandler> MSCodeSeg;
std::unique_ptr<PragmaHandler> MSSection;
std::unique_ptr<PragmaHandler> MSRuntimeChecks;
std::unique_ptr<PragmaHandler> MSIntrinsic;
std::unique_ptr<PragmaHandler> MSOptimize;
std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler;
std::unique_ptr<PragmaHandler> OptimizeHandler;
std::unique_ptr<PragmaHandler> LoopHintHandler;
std::unique_ptr<PragmaHandler> UnrollHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollHintHandler;
std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler;
std::unique_ptr<PragmaHandler> FPHandler;
std::unique_ptr<PragmaHandler> STDCFENVHandler;
std::unique_ptr<PragmaHandler> STDCCXLIMITHandler;
std::unique_ptr<PragmaHandler> STDCUnknownHandler;
std::unique_ptr<PragmaHandler> AttributePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensHerePragmaHandler;
std::unique_ptr<PragmaHandler> MaxTokensTotalPragmaHandler;
std::unique_ptr<CommentHandler> CommentSemaHandler;
/// Whether the '>' token acts as an operator or not. This will be
/// true except when we are parsing an expression within a C++
/// template argument list, where the '>' closes the template
/// argument list.
bool GreaterThanIsOperator;
/// ColonIsSacred - When this is false, we aggressively try to recover from
/// code like "foo : bar" as if it were a typo for "foo :: bar". This is not
/// safe in case statements and a few other things. This is managed by the
/// ColonProtectionRAIIObject RAII object.
bool ColonIsSacred;
/// Parsing OpenMP directive mode.
bool OpenMPDirectiveParsing = false;
/// When true, we are directly inside an Objective-C message
/// send expression.
///
/// This is managed by the \c InMessageExpressionRAIIObject class, and
/// should not be set directly.
bool InMessageExpression;
/// Gets set to true after calling ProduceSignatureHelp, it is for a
/// workaround to make sure ProduceSignatureHelp is only called at the deepest
/// function call.
bool CalledSignatureHelp = false;
/// The "depth" of the template parameters currently being parsed.
unsigned TemplateParameterDepth;
/// RAII class that manages the template parameter depth.
class TemplateParameterDepthRAII {
unsigned &Depth;
unsigned AddedLevels;
public:
explicit TemplateParameterDepthRAII(unsigned &Depth)
: Depth(Depth), AddedLevels(0) {}
~TemplateParameterDepthRAII() {
Depth -= AddedLevels;
}
void operator++() {
++Depth;
++AddedLevels;
}
void addDepth(unsigned D) {
Depth += D;
AddedLevels += D;
}
void setAddedDepth(unsigned D) {
Depth = Depth - AddedLevels + D;
AddedLevels = D;
}
unsigned getDepth() const { return Depth; }
unsigned getOriginalDepth() const { return Depth - AddedLevels; }
};
/// Factory object for creating ParsedAttr objects.
AttributeFactory AttrFactory;
/// Gathers and cleans up TemplateIdAnnotations when parsing of a
/// top-level declaration is finished.
SmallVector<TemplateIdAnnotation *, 16> TemplateIds;
/// Identifiers which have been declared within a tentative parse.
SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers;
/// Tracker for '<' tokens that might have been intended to be treated as an
/// angle bracket instead of a less-than comparison.
///
/// This happens when the user intends to form a template-id, but typoes the
/// template-name or forgets a 'template' keyword for a dependent template
/// name.
///
/// We track these locations from the point where we see a '<' with a
/// name-like expression on its left until we see a '>' or '>>' that might
/// match it.
struct AngleBracketTracker {
/// Flags used to rank candidate template names when there is more than one
/// '<' in a scope.
enum Priority : unsigned short {
/// A non-dependent name that is a potential typo for a template name.
PotentialTypo = 0x0,
/// A dependent name that might instantiate to a template-name.
DependentName = 0x2,
/// A space appears before the '<' token.
SpaceBeforeLess = 0x0,
/// No space before the '<' token
NoSpaceBeforeLess = 0x1,
LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName)
};
struct Loc {
Expr *TemplateName;
SourceLocation LessLoc;
AngleBracketTracker::Priority Priority;
unsigned short ParenCount, BracketCount, BraceCount;
bool isActive(Parser &P) const {
return P.ParenCount == ParenCount && P.BracketCount == BracketCount &&
P.BraceCount == BraceCount;
}
bool isActiveOrNested(Parser &P) const {
return isActive(P) || P.ParenCount > ParenCount ||
P.BracketCount > BracketCount || P.BraceCount > BraceCount;
}
};
SmallVector<Loc, 8> Locs;
/// Add an expression that might have been intended to be a template name.
/// In the case of ambiguity, we arbitrarily select the innermost such
/// expression, for example in 'foo < bar < baz', 'bar' is the current
/// candidate. No attempt is made to track that 'foo' is also a candidate
/// for the case where we see a second suspicious '>' token.
void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc,
Priority Prio) {
if (!Locs.empty() && Locs.back().isActive(P)) {
if (Locs.back().Priority <= Prio) {
Locs.back().TemplateName = TemplateName;
Locs.back().LessLoc = LessLoc;
Locs.back().Priority = Prio;
}
} else {
Locs.push_back({TemplateName, LessLoc, Prio,
P.ParenCount, P.BracketCount, P.BraceCount});
}
}
/// Mark the current potential missing template location as having been
/// handled (this happens if we pass a "corresponding" '>' or '>>' token
/// or leave a bracket scope).
void clear(Parser &P) {
while (!Locs.empty() && Locs.back().isActiveOrNested(P))
Locs.pop_back();
}
/// Get the current enclosing expression that might hve been intended to be
/// a template name.
Loc *getCurrent(Parser &P) {
if (!Locs.empty() && Locs.back().isActive(P))
return &Locs.back();
return nullptr;
}
};
AngleBracketTracker AngleBrackets;
IdentifierInfo *getSEHExceptKeyword();
/// True if we are within an Objective-C container while parsing C-like decls.
///
/// This is necessary because Sema thinks we have left the container
/// to parse the C-like decls, meaning Actions.getObjCDeclContext() will
/// be NULL.
bool ParsingInObjCContainer;
/// Whether to skip parsing of function bodies.
///
/// This option can be used, for example, to speed up searches for
/// declarations/definitions when indexing.
bool SkipFunctionBodies;
/// The location of the expression statement that is being parsed right now.
/// Used to determine if an expression that is being parsed is a statement or
/// just a regular sub-expression.
SourceLocation ExprStatementTokLoc;
/// Flags describing a context in which we're parsing a statement.
enum class ParsedStmtContext {
/// This context permits declarations in language modes where declarations
/// are not statements.
AllowDeclarationsInC = 0x1,
/// This context permits standalone OpenMP directives.
AllowStandaloneOpenMPDirectives = 0x2,
/// This context is at the top level of a GNU statement expression.
InStmtExpr = 0x4,
/// The context of a regular substatement.
SubStmt = 0,
/// The context of a compound-statement.
Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives,
LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr)
};
/// Act on an expression statement that might be the last statement in a
/// GNU statement expression. Checks whether we are actually at the end of
/// a statement expression and builds a suitable expression statement.
StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx);
public:
Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies);
~Parser() override;
const LangOptions &getLangOpts() const { return PP.getLangOpts(); }
const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); }
Preprocessor &getPreprocessor() const { return PP; }
Sema &getActions() const { return Actions; }
AttributeFactory &getAttrFactory() { return AttrFactory; }
const Token &getCurToken() const { return Tok; }
Scope *getCurScope() const { return Actions.getCurScope(); }
void incrementMSManglingNumber() const {
return Actions.incrementMSManglingNumber();
}
Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); }
// Type forwarding. All of these are statically 'void*', but they may all be
// different actual classes based on the actions in place.
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists;
typedef Sema::FullExprArg FullExprArg;
// Parsing methods.
/// Initialize - Warm up the parser.
///
void Initialize();
/// Parse the first top-level declaration in a translation unit.
bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result);
/// ParseTopLevelDecl - Parse one top-level declaration. Returns true if
/// the EOF was encountered.
bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false);
bool ParseTopLevelDecl() {
DeclGroupPtrTy Result;
return ParseTopLevelDecl(Result);
}
/// ConsumeToken - Consume the current 'peek token' and lex the next one.
/// This does not work with special tokens: string literals, code completion,
/// annotation tokens and balanced tokens must be handled using the specific
/// consume methods.
/// Returns the location of the consumed token.
SourceLocation ConsumeToken() {
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
bool TryConsumeToken(tok::TokenKind Expected) {
if (Tok.isNot(Expected))
return false;
assert(!isTokenSpecial() &&
"Should consume special tokens with Consume*Token");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return true;
}
bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) {
if (!TryConsumeToken(Expected))
return false;
Loc = PrevTokLocation;
return true;
}
/// ConsumeAnyToken - Dispatch to the right Consume* method based on the
/// current token type. This should only be used in cases where the type of
/// the token really isn't known, e.g. in error recovery.
SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) {
if (isTokenParen())
return ConsumeParen();
if (isTokenBracket())
return ConsumeBracket();
if (isTokenBrace())
return ConsumeBrace();
if (isTokenStringLiteral())
return ConsumeStringToken();
if (Tok.is(tok::code_completion))
return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken()
: handleUnexpectedCodeCompletionToken();
if (Tok.isAnnotation())
return ConsumeAnnotationToken();
return ConsumeToken();
}
SourceLocation getEndOfPreviousToken() {
return PP.getLocForEndOfToken(PrevTokLocation);
}
/// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds
/// to the given nullability kind.
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) {
return Actions.getNullabilityKeyword(nullability);
}
private:
//===--------------------------------------------------------------------===//
// Low-Level token peeking and consumption methods.
//
/// isTokenParen - Return true if the cur token is '(' or ')'.
bool isTokenParen() const {
return Tok.isOneOf(tok::l_paren, tok::r_paren);
}
/// isTokenBracket - Return true if the cur token is '[' or ']'.
bool isTokenBracket() const {
return Tok.isOneOf(tok::l_square, tok::r_square);
}
/// isTokenBrace - Return true if the cur token is '{' or '}'.
bool isTokenBrace() const {
return Tok.isOneOf(tok::l_brace, tok::r_brace);
}
/// isTokenStringLiteral - True if this token is a string-literal.
bool isTokenStringLiteral() const {
return tok::isStringLiteral(Tok.getKind());
}
/// isTokenSpecial - True if this token requires special consumption methods.
bool isTokenSpecial() const {
return isTokenStringLiteral() || isTokenParen() || isTokenBracket() ||
isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation();
}
/// Returns true if the current token is '=' or is a type of '='.
/// For typos, give a fixit to '='
bool isTokenEqualOrEqualTypo();
/// Return the current token to the token stream and make the given
/// token the current token.
void UnconsumeToken(Token &Consumed) {
Token Next = Tok;
PP.EnterToken(Consumed, /*IsReinject*/true);
PP.Lex(Tok);
PP.EnterToken(Next, /*IsReinject*/true);
}
SourceLocation ConsumeAnnotationToken() {
assert(Tok.isAnnotation() && "wrong consume method");
SourceLocation Loc = Tok.getLocation();
PrevTokLocation = Tok.getAnnotationEndLoc();
PP.Lex(Tok);
return Loc;
}
/// ConsumeParen - This consume method keeps the paren count up-to-date.
///
SourceLocation ConsumeParen() {
assert(isTokenParen() && "wrong consume method");
if (Tok.getKind() == tok::l_paren)
++ParenCount;
else if (ParenCount) {
AngleBrackets.clear(*this);
--ParenCount; // Don't let unbalanced )'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBracket - This consume method keeps the bracket count up-to-date.
///
SourceLocation ConsumeBracket() {
assert(isTokenBracket() && "wrong consume method");
if (Tok.getKind() == tok::l_square)
++BracketCount;
else if (BracketCount) {
AngleBrackets.clear(*this);
--BracketCount; // Don't let unbalanced ]'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeBrace - This consume method keeps the brace count up-to-date.
///
SourceLocation ConsumeBrace() {
assert(isTokenBrace() && "wrong consume method");
if (Tok.getKind() == tok::l_brace)
++BraceCount;
else if (BraceCount) {
AngleBrackets.clear(*this);
--BraceCount; // Don't let unbalanced }'s drive the count negative.
}
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// ConsumeStringToken - Consume the current 'peek token', lexing a new one
/// and returning the token kind. This method is specific to strings, as it
/// handles string literal concatenation, as per C99 5.1.1.2, translation
/// phase #6.
SourceLocation ConsumeStringToken() {
assert(isTokenStringLiteral() &&
"Should only consume string literals with this method");
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
/// Consume the current code-completion token.
///
/// This routine can be called to consume the code-completion token and
/// continue processing in special cases where \c cutOffParsing() isn't
/// desired, such as token caching or completion with lookahead.
SourceLocation ConsumeCodeCompletionToken() {
assert(Tok.is(tok::code_completion));
PrevTokLocation = Tok.getLocation();
PP.Lex(Tok);
return PrevTokLocation;
}
///\ brief When we are consuming a code-completion token without having
/// matched specific position in the grammar, provide code-completion results
/// based on context.
///
/// \returns the source location of the code-completion token.
SourceLocation handleUnexpectedCodeCompletionToken();
/// Abruptly cut off parsing; mainly used when we have reached the
/// code-completion point.
void cutOffParsing() {
if (PP.isCodeCompletionEnabled())
PP.setCodeCompletionReached();
// Cut off parsing by acting as if we reached the end-of-file.
Tok.setKind(tok::eof);
}
/// Determine if we're at the end of the file or at a transition
/// between modules.
bool isEofOrEom() {
tok::TokenKind Kind = Tok.getKind();
return Kind == tok::eof || Kind == tok::annot_module_begin ||
Kind == tok::annot_module_end || Kind == tok::annot_module_include;
}
/// Checks if the \p Level is valid for use in a fold expression.
bool isFoldOperator(prec::Level Level) const;
/// Checks if the \p Kind is a valid operator for fold expressions.
bool isFoldOperator(tok::TokenKind Kind) const;
/// Initialize all pragma handlers.
void initializePragmaHandlers();
/// Destroy and reset all pragma handlers.
void resetPragmaHandlers();
/// Handle the annotation token produced for #pragma unused(...)
void HandlePragmaUnused();
/// Handle the annotation token produced for
/// #pragma GCC visibility...
void HandlePragmaVisibility();
/// Handle the annotation token produced for
/// #pragma pack...
void HandlePragmaPack();
/// Handle the annotation token produced for
/// #pragma ms_struct...
void HandlePragmaMSStruct();
/// Handle the annotation token produced for
/// #pragma comment...
void HandlePragmaMSComment();
void HandlePragmaMSPointersToMembers();
void HandlePragmaMSVtorDisp();
void HandlePragmaMSPragma();
bool HandlePragmaMSSection(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSSegment(StringRef PragmaName,
SourceLocation PragmaLocation);
bool HandlePragmaMSInitSeg(StringRef PragmaName,
SourceLocation PragmaLocation);
/// Handle the annotation token produced for
/// #pragma align...
void HandlePragmaAlign();
/// Handle the annotation token produced for
/// #pragma clang __debug dump...
void HandlePragmaDump();
/// Handle the annotation token produced for
/// #pragma weak id...
void HandlePragmaWeak();
/// Handle the annotation token produced for
/// #pragma weak id = id...
void HandlePragmaWeakAlias();
/// Handle the annotation token produced for
/// #pragma redefine_extname...
void HandlePragmaRedefineExtname();
/// Handle the annotation token produced for
/// #pragma STDC FP_CONTRACT...
void HandlePragmaFPContract();
/// Handle the annotation token produced for
/// #pragma STDC FENV_ACCESS...
void HandlePragmaFEnvAccess();
/// \brief Handle the annotation token produced for
/// #pragma clang fp ...
void HandlePragmaFP();
/// Handle the annotation token produced for
/// #pragma OPENCL EXTENSION...
void HandlePragmaOpenCLExtension();
/// Handle the annotation token produced for
/// #pragma clang __debug captured
StmtResult HandlePragmaCaptured();
/// Handle the annotation token produced for
/// #pragma clang loop and #pragma unroll.
bool HandlePragmaLoopHint(LoopHint &Hint);
bool ParsePragmaAttributeSubjectMatchRuleSet(
attr::ParsedSubjectMatchRuleSet &SubjectMatchRules,
SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc);
void HandlePragmaAttribute();
/// GetLookAheadToken - This peeks ahead N tokens and returns that token
/// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1)
/// returns the token after Tok, etc.
///
/// Note that this differs from the Preprocessor's LookAhead method, because
/// the Parser always has one token lexed that the preprocessor doesn't.
///
const Token &GetLookAheadToken(unsigned N) {
if (N == 0 || Tok.is(tok::eof)) return Tok;
return PP.LookAhead(N-1);
}
public:
/// NextToken - This peeks ahead one token and returns it without
/// consuming it.
const Token &NextToken() {
return PP.LookAhead(0);
}
/// getTypeAnnotation - Read a parsed type out of an annotation token.
static ParsedType getTypeAnnotation(const Token &Tok) {
return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue());
}
private:
static void setTypeAnnotation(Token &Tok, ParsedType T) {
Tok.setAnnotationValue(T.getAsOpaquePtr());
}
static NamedDecl *getNonTypeAnnotation(const Token &Tok) {
return static_cast<NamedDecl*>(Tok.getAnnotationValue());
}
static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) {
Tok.setAnnotationValue(ND);
}
static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) {
return static_cast<IdentifierInfo*>(Tok.getAnnotationValue());
}
static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) {
Tok.setAnnotationValue(ND);
}
/// Read an already-translated primary expression out of an annotation
/// token.
static ExprResult getExprAnnotation(const Token &Tok) {
return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue());
}
/// Set the primary expression corresponding to the given annotation
/// token.
static void setExprAnnotation(Token &Tok, ExprResult ER) {
Tok.setAnnotationValue(ER.getAsOpaquePointer());
}
public:
// If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to
// find a type name by attempting typo correction.
bool TryAnnotateTypeOrScopeToken();
bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS,
bool IsNewScope);
bool TryAnnotateCXXScopeToken(bool EnteringContext = false);
bool MightBeCXXScopeToken() {
return Tok.is(tok::identifier) || Tok.is(tok::coloncolon) ||
(Tok.is(tok::annot_template_id) &&
NextToken().is(tok::coloncolon)) ||
Tok.is(tok::kw_decltype) || Tok.is(tok::kw___super);
}
bool TryAnnotateOptionalCXXScopeToken(bool EnteringContext = false) {
return MightBeCXXScopeToken() && TryAnnotateCXXScopeToken(EnteringContext);
}
private:
enum AnnotatedNameKind {
/// Annotation has failed and emitted an error.
ANK_Error,
/// The identifier is a tentatively-declared name.
ANK_TentativeDecl,
/// The identifier is a template name. FIXME: Add an annotation for that.
ANK_TemplateName,
/// The identifier can't be resolved.
ANK_Unresolved,
/// Annotation was successful.
ANK_Success
};
AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr);
/// Push a tok::annot_cxxscope token onto the token stream.
void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation);
/// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens,
/// replacing them with the non-context-sensitive keywords. This returns
/// true if the token was replaced.
bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid) {
if (!getLangOpts().AltiVec && !getLangOpts().ZVector)
return false;
if (Tok.getIdentifierInfo() != Ident_vector &&
Tok.getIdentifierInfo() != Ident_bool &&
(!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel))
return false;
return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid);
}
/// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector
/// identifier token, replacing it with the non-context-sensitive __vector.
/// This returns true if the token was replaced.
bool TryAltiVecVectorToken() {
if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) ||
Tok.getIdentifierInfo() != Ident_vector) return false;
return TryAltiVecVectorTokenOutOfLine();
}
bool TryAltiVecVectorTokenOutOfLine();
bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc,
const char *&PrevSpec, unsigned &DiagID,
bool &isInvalid);
/// Returns true if the current token is the identifier 'instancetype'.
///
/// Should only be used in Objective-C language modes.
bool isObjCInstancetype() {
assert(getLangOpts().ObjC);
if (Tok.isAnnotation())
return false;
if (!Ident_instancetype)
Ident_instancetype = PP.getIdentifierInfo("instancetype");
return Tok.getIdentifierInfo() == Ident_instancetype;
}
/// TryKeywordIdentFallback - For compatibility with system headers using
/// keywords as identifiers, attempt to convert the current token to an
/// identifier and optionally disable the keyword for the remainder of the
/// translation unit. This returns false if the token was not replaced,
/// otherwise emits a diagnostic and returns true.
bool TryKeywordIdentFallback(bool DisableKeyword);
/// Get the TemplateIdAnnotation from the token.
TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok);
/// TentativeParsingAction - An object that is used as a kind of "tentative
/// parsing transaction". It gets instantiated to mark the token position and
/// after the token consumption is done, Commit() or Revert() is called to
/// either "commit the consumed tokens" or revert to the previously marked
/// token position. Example:
///
/// TentativeParsingAction TPA(*this);
/// ConsumeToken();
/// ....
/// TPA.Revert();
///
class TentativeParsingAction {
Parser &P;
PreferredTypeBuilder PrevPreferredType;
Token PrevTok;
size_t PrevTentativelyDeclaredIdentifierCount;
unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount;
bool isActive;
public:
explicit TentativeParsingAction(Parser& p) : P(p) {
PrevPreferredType = P.PreferredType;
PrevTok = P.Tok;
PrevTentativelyDeclaredIdentifierCount =
P.TentativelyDeclaredIdentifiers.size();
PrevParenCount = P.ParenCount;
PrevBracketCount = P.BracketCount;
PrevBraceCount = P.BraceCount;
P.PP.EnableBacktrackAtThisPos();
isActive = true;
}
void Commit() {
assert(isActive && "Parsing action was finished!");
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.PP.CommitBacktrackedTokens();
isActive = false;
}
void Revert() {
assert(isActive && "Parsing action was finished!");
P.PP.Backtrack();
P.PreferredType = PrevPreferredType;
P.Tok = PrevTok;
P.TentativelyDeclaredIdentifiers.resize(
PrevTentativelyDeclaredIdentifierCount);
P.ParenCount = PrevParenCount;
P.BracketCount = PrevBracketCount;
P.BraceCount = PrevBraceCount;
isActive = false;
}
~TentativeParsingAction() {
assert(!isActive && "Forgot to call Commit or Revert!");
}
};
/// A TentativeParsingAction that automatically reverts in its destructor.
/// Useful for disambiguation parses that will always be reverted.
class RevertingTentativeParsingAction
: private Parser::TentativeParsingAction {
public:
RevertingTentativeParsingAction(Parser &P)
: Parser::TentativeParsingAction(P) {}
~RevertingTentativeParsingAction() { Revert(); }
};
class UnannotatedTentativeParsingAction;
/// ObjCDeclContextSwitch - An object used to switch context from
/// an objective-c decl context to its enclosing decl context and
/// back.
class ObjCDeclContextSwitch {
Parser &P;
Decl *DC;
SaveAndRestore<bool> WithinObjCContainer;
public:
explicit ObjCDeclContextSwitch(Parser &p)
: P(p), DC(p.getObjCDeclContext()),
WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) {
if (DC)
P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC));
}
~ObjCDeclContextSwitch() {
if (DC)
P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC));
}
};
/// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the
/// input. If so, it is consumed and false is returned.
///
/// If a trivial punctuator misspelling is encountered, a FixIt error
/// diagnostic is issued and false is returned after recovery.
///
/// If the input is malformed, this emits the specified diagnostic and true is
/// returned.
bool ExpectAndConsume(tok::TokenKind ExpectedTok,
unsigned Diag = diag::err_expected,
StringRef DiagMsg = "");
/// The parser expects a semicolon and, if present, will consume it.
///
/// If the next token is not a semicolon, this emits the specified diagnostic,
/// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior
/// to the semicolon, consumes that extra token.
bool ExpectAndConsumeSemi(unsigned DiagID);
/// The kind of extra semi diagnostic to emit.
enum ExtraSemiKind {
OutsideFunction = 0,
InsideStruct = 1,
InstanceVariableList = 2,
AfterMemberFunctionDefinition = 3
};
/// Consume any extra semi-colons until the end of the line.
void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified);
/// Return false if the next token is an identifier. An 'expected identifier'
/// error is emitted otherwise.
///
/// The parser tries to recover from the error by checking if the next token
/// is a C++ keyword when parsing Objective-C++. Return false if the recovery
/// was successful.
bool expectIdentifier();
public:
//===--------------------------------------------------------------------===//
// Scope manipulation
/// ParseScope - Introduces a new scope for parsing. The kind of
/// scope is determined by ScopeFlags. Objects of this type should
/// be created on the stack to coincide with the position where the
/// parser enters the new scope, and this object's constructor will
/// create that new scope. Similarly, once the object is destroyed
/// the parser will exit the scope.
class ParseScope {
Parser *Self;
ParseScope(const ParseScope &) = delete;
void operator=(const ParseScope &) = delete;
public:
// ParseScope - Construct a new object to manage a scope in the
// parser Self where the new Scope is created with the flags
// ScopeFlags, but only when we aren't about to enter a compound statement.
ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true,
bool BeforeCompoundStmt = false)
: Self(Self) {
if (EnteredScope && !BeforeCompoundStmt)
Self->EnterScope(ScopeFlags);
else {
if (BeforeCompoundStmt)
Self->incrementMSManglingNumber();
this->Self = nullptr;
}
}
// Exit - Exit the scope associated with this object now, rather
// than waiting until the object is destroyed.
void Exit() {
if (Self) {
Self->ExitScope();
Self = nullptr;
}
}
~ParseScope() {
Exit();
}
};
/// EnterScope - Start a new scope.
void EnterScope(unsigned ScopeFlags);
/// ExitScope - Pop a scope off the scope stack.
void ExitScope();
private:
/// RAII object used to modify the scope flags for the current scope.
class ParseScopeFlags {
Scope *CurScope;
unsigned OldFlags;
ParseScopeFlags(const ParseScopeFlags &) = delete;
void operator=(const ParseScopeFlags &) = delete;
public:
ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true);
~ParseScopeFlags();
};
//===--------------------------------------------------------------------===//
// Diagnostic Emission and Error recovery.
public:
DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID);
DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID);
DiagnosticBuilder Diag(unsigned DiagID) {
return Diag(Tok, DiagID);
}
private:
void SuggestParentheses(SourceLocation Loc, unsigned DK,
SourceRange ParenRange);
void CheckNestedObjCContexts(SourceLocation AtLoc);
public:
/// Control flags for SkipUntil functions.
enum SkipUntilFlags {
StopAtSemi = 1 << 0, ///< Stop skipping at semicolon
/// Stop skipping at specified token, but don't skip the token itself
StopBeforeMatch = 1 << 1,
StopAtCodeCompletion = 1 << 2 ///< Stop at code completion
};
friend constexpr SkipUntilFlags operator|(SkipUntilFlags L,
SkipUntilFlags R) {
return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) |
static_cast<unsigned>(R));
}
/// SkipUntil - Read tokens until we get to the specified token, then consume
/// it (unless StopBeforeMatch is specified). Because we cannot guarantee
/// that the token will ever occur, this skips to the next token, or to some
/// likely good stopping point. If Flags has StopAtSemi flag, skipping will
/// stop at a ';' character. Balances (), [], and {} delimiter tokens while
/// skipping.
///
/// If SkipUntil finds the specified token, it returns true, otherwise it
/// returns false.
bool SkipUntil(tok::TokenKind T,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
return SkipUntil(llvm::makeArrayRef(T), Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) {
tok::TokenKind TokArray[] = {T1, T2, T3};
return SkipUntil(TokArray, Flags);
}
bool SkipUntil(ArrayRef<tok::TokenKind> Toks,
SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0));
/// SkipMalformedDecl - Read tokens until we get to some likely good stopping
/// point for skipping past a simple-declaration.
void SkipMalformedDecl();
/// The location of the first statement inside an else that might
/// have a missleading indentation. If there is no
/// MisleadingIndentationChecker on an else active, this location is invalid.
SourceLocation MisleadingIndentationElseLoc;
private:
//===--------------------------------------------------------------------===//
// Lexing and parsing of C++ inline methods.
struct ParsingClass;
/// [class.mem]p1: "... the class is regarded as complete within
/// - function bodies
/// - default arguments
/// - exception-specifications (TODO: C++0x)
/// - and brace-or-equal-initializers for non-static data members
/// (including such things in nested classes)."
/// LateParsedDeclarations build the tree of those elements so they can
/// be parsed after parsing the top-level class.
class LateParsedDeclaration {
public:
virtual ~LateParsedDeclaration();
virtual void ParseLexedMethodDeclarations();
virtual void ParseLexedMemberInitializers();
virtual void ParseLexedMethodDefs();
virtual void ParseLexedAttributes();
virtual void ParseLexedPragmas();
};
/// Inner node of the LateParsedDeclaration tree that parses
/// all its members recursively.
class LateParsedClass : public LateParsedDeclaration {
public:
LateParsedClass(Parser *P, ParsingClass *C);
~LateParsedClass() override;
void ParseLexedMethodDeclarations() override;
void ParseLexedMemberInitializers() override;
void ParseLexedMethodDefs() override;
void ParseLexedAttributes() override;
void ParseLexedPragmas() override;
private:
Parser *Self;
ParsingClass *Class;
};
/// Contains the lexed tokens of an attribute with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
/// FIXME: Perhaps we should change the name of LateParsedDeclaration to
/// LateParsedTokens.
struct LateParsedAttribute : public LateParsedDeclaration {
Parser *Self;
CachedTokens Toks;
IdentifierInfo &AttrName;
IdentifierInfo *MacroII = nullptr;
SourceLocation AttrNameLoc;
SmallVector<Decl*, 2> Decls;
explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name,
SourceLocation Loc)
: Self(P), AttrName(Name), AttrNameLoc(Loc) {}
void ParseLexedAttributes() override;
void addDecl(Decl *D) { Decls.push_back(D); }
};
/// Contains the lexed tokens of a pragma with arguments that
/// may reference member variables and so need to be parsed at the
/// end of the class declaration after parsing all other member
/// member declarations.
class LateParsedPragma : public LateParsedDeclaration {
Parser *Self = nullptr;
AccessSpecifier AS = AS_none;
CachedTokens Toks;
public:
explicit LateParsedPragma(Parser *P, AccessSpecifier AS)
: Self(P), AS(AS) {}
void takeToks(CachedTokens &Cached) { Toks.swap(Cached); }
const CachedTokens &toks() const { return Toks; }
AccessSpecifier getAccessSpecifier() const { return AS; }
void ParseLexedPragmas() override;
};
// A list of late-parsed attributes. Used by ParseGNUAttributes.
class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> {
public:
LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { }
bool parseSoon() { return ParseSoon; }
private:
bool ParseSoon; // Are we planning to parse these shortly after creation?
};
/// Contains the lexed tokens of a member function definition
/// which needs to be parsed at the end of the class declaration
/// after parsing all other member declarations.
struct LexedMethod : public LateParsedDeclaration {
Parser *Self;
Decl *D;
CachedTokens Toks;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
explicit LexedMethod(Parser* P, Decl *MD)
: Self(P), D(MD), TemplateScope(false) {}
void ParseLexedMethodDefs() override;
};
/// LateParsedDefaultArgument - Keeps track of a parameter that may
/// have a default argument that cannot be parsed yet because it
/// occurs within a member function declaration inside the class
/// (C++ [class.mem]p2).
struct LateParsedDefaultArgument {
explicit LateParsedDefaultArgument(Decl *P,
std::unique_ptr<CachedTokens> Toks = nullptr)
: Param(P), Toks(std::move(Toks)) { }
/// Param - The parameter declaration for this parameter.
Decl *Param;
/// Toks - The sequence of tokens that comprises the default
/// argument expression, not including the '=' or the terminating
/// ')' or ','. This will be NULL for parameters that have no
/// default argument.
std::unique_ptr<CachedTokens> Toks;
};
/// LateParsedMethodDeclaration - A method declaration inside a class that
/// contains at least one entity whose parsing needs to be delayed
/// until the class itself is completely-defined, such as a default
/// argument (C++ [class.mem]p2).
struct LateParsedMethodDeclaration : public LateParsedDeclaration {
explicit LateParsedMethodDeclaration(Parser *P, Decl *M)
: Self(P), Method(M), TemplateScope(false),
ExceptionSpecTokens(nullptr) {}
void ParseLexedMethodDeclarations() override;
Parser* Self;
/// Method - The method declaration.
Decl *Method;
/// Whether this member function had an associated template
/// scope. When true, D is a template declaration.
/// otherwise, it is a member function declaration.
bool TemplateScope;
/// DefaultArgs - Contains the parameters of the function and
/// their default arguments. At least one of the parameters will
/// have a default argument, but all of the parameters of the
/// method will be stored so that they can be reintroduced into
/// scope at the appropriate times.
SmallVector<LateParsedDefaultArgument, 8> DefaultArgs;
/// The set of tokens that make up an exception-specification that
/// has not yet been parsed.
CachedTokens *ExceptionSpecTokens;
};
/// LateParsedMemberInitializer - An initializer for a non-static class data
/// member whose parsing must to be delayed until the class is completely
/// defined (C++11 [class.mem]p2).
struct LateParsedMemberInitializer : public LateParsedDeclaration {
LateParsedMemberInitializer(Parser *P, Decl *FD)
: Self(P), Field(FD) { }
void ParseLexedMemberInitializers() override;
Parser *Self;
/// Field - The field declaration.
Decl *Field;
/// CachedTokens - The sequence of tokens that comprises the initializer,
/// including any leading '='.
CachedTokens Toks;
};
/// LateParsedDeclarationsContainer - During parsing of a top (non-nested)
/// C++ class, its method declarations that contain parts that won't be
/// parsed until after the definition is completed (C++ [class.mem]p2),
/// the method declarations and possibly attached inline definitions
/// will be stored here with the tokens that will be parsed to create those
/// entities.
typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer;
/// Representation of a class that has been parsed, including
/// any member function declarations or definitions that need to be
/// parsed after the corresponding top-level class is complete.
struct ParsingClass {
ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface)
: TopLevelClass(TopLevelClass), TemplateScope(false),
IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { }
/// Whether this is a "top-level" class, meaning that it is
/// not nested within another class.
bool TopLevelClass : 1;
/// Whether this class had an associated template
/// scope. When true, TagOrTemplate is a template declaration;
/// otherwise, it is a tag declaration.
bool TemplateScope : 1;
/// Whether this class is an __interface.
bool IsInterface : 1;
/// The class or class template whose definition we are parsing.
Decl *TagOrTemplate;
/// LateParsedDeclarations - Method declarations, inline definitions and
/// nested classes that contain pieces whose parsing will be delayed until
/// the top-level class is fully defined.
LateParsedDeclarationsContainer LateParsedDeclarations;
};
/// The stack of classes that is currently being
/// parsed. Nested and local classes will be pushed onto this stack
/// when they are parsed, and removed afterward.
std::stack<ParsingClass *> ClassStack;
ParsingClass &getCurrentClass() {
assert(!ClassStack.empty() && "No lexed method stacks!");
return *ClassStack.top();
}
/// RAII object used to manage the parsing of a class definition.
class ParsingClassDefinition {
Parser &P;
bool Popped;
Sema::ParsingClassState State;
public:
ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass,
bool IsInterface)
: P(P), Popped(false),
State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) {
}
/// Pop this class of the stack.
void Pop() {
assert(!Popped && "Nested class has already been popped");
Popped = true;
P.PopParsingClass(State);
}
~ParsingClassDefinition() {
if (!Popped)
P.PopParsingClass(State);
}
};
/// Contains information about any template-specific
/// information that has been parsed prior to parsing declaration
/// specifiers.
struct ParsedTemplateInfo {
ParsedTemplateInfo()
: Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { }
ParsedTemplateInfo(TemplateParameterLists *TemplateParams,
bool isSpecialization,
bool lastParameterListWasEmpty = false)
: Kind(isSpecialization? ExplicitSpecialization : Template),
TemplateParams(TemplateParams),
LastParameterListWasEmpty(lastParameterListWasEmpty) { }
explicit ParsedTemplateInfo(SourceLocation ExternLoc,
SourceLocation TemplateLoc)
: Kind(ExplicitInstantiation), TemplateParams(nullptr),
ExternLoc(ExternLoc), TemplateLoc(TemplateLoc),
LastParameterListWasEmpty(false){ }
/// The kind of template we are parsing.
enum {
/// We are not parsing a template at all.
NonTemplate = 0,
/// We are parsing a template declaration.
Template,
/// We are parsing an explicit specialization.
ExplicitSpecialization,
/// We are parsing an explicit instantiation.
ExplicitInstantiation
} Kind;
/// The template parameter lists, for template declarations
/// and explicit specializations.
TemplateParameterLists *TemplateParams;
/// The location of the 'extern' keyword, if any, for an explicit
/// instantiation
SourceLocation ExternLoc;
/// The location of the 'template' keyword, for an explicit
/// instantiation.
SourceLocation TemplateLoc;
/// Whether the last template parameter list was empty.
bool LastParameterListWasEmpty;
SourceRange getSourceRange() const LLVM_READONLY;
};
void LexTemplateFunctionForLateParsing(CachedTokens &Toks);
void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT);
static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT);
static void LateTemplateParserCleanupCallback(void *P);
Sema::ParsingClassState
PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface);
void DeallocateParsedClasses(ParsingClass *Class);
void PopParsingClass(Sema::ParsingClassState);
enum CachedInitKind {
CIK_DefaultArgument,
CIK_DefaultInitializer
};
NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS,
ParsedAttributes &AccessAttrs,
ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo,
const VirtSpecifiers &VS,
SourceLocation PureSpecLoc);
void ParseCXXNonStaticMemberInitializer(Decl *VarD);
void ParseLexedAttributes(ParsingClass &Class);
void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D,
bool EnterScope, bool OnDefinition);
void ParseLexedAttribute(LateParsedAttribute &LA,
bool EnterScope, bool OnDefinition);
void ParseLexedMethodDeclarations(ParsingClass &Class);
void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM);
void ParseLexedMethodDefs(ParsingClass &Class);
void ParseLexedMethodDef(LexedMethod &LM);
void ParseLexedMemberInitializers(ParsingClass &Class);
void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI);
void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod);
void ParseLexedPragmas(ParsingClass &Class);
void ParseLexedPragma(LateParsedPragma &LP);
bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks);
bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK);
bool ConsumeAndStoreConditional(CachedTokens &Toks);
bool ConsumeAndStoreUntil(tok::TokenKind T1,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true) {
return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken);
}
bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2,
CachedTokens &Toks,
bool StopAtSemi = true,
bool ConsumeFinalToken = true);
//===--------------------------------------------------------------------===//
// C99 6.9: External Definitions.
struct ParsedAttributesWithRange : ParsedAttributes {
ParsedAttributesWithRange(AttributeFactory &factory)
: ParsedAttributes(factory) {}
void clear() {
ParsedAttributes::clear();
Range = SourceRange();
}
SourceRange Range;
};
struct ParsedAttributesViewWithRange : ParsedAttributesView {
ParsedAttributesViewWithRange() : ParsedAttributesView() {}
void clearListOnly() {
ParsedAttributesView::clearListOnly();
Range = SourceRange();
}
SourceRange Range;
};
DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr);
bool isDeclarationAfterDeclarator();
bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator);
DeclGroupPtrTy ParseDeclarationOrFunctionDefinition(
ParsedAttributesWithRange &attrs,
ParsingDeclSpec *DS = nullptr,
AccessSpecifier AS = AS_none);
DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
ParsingDeclSpec &DS,
AccessSpecifier AS);
void SkipFunctionBody();
Decl *ParseFunctionDefinition(ParsingDeclarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
LateParsedAttrList *LateParsedAttrs = nullptr);
void ParseKNRParamDeclarations(Declarator &D);
// EndLoc is filled with the location of the last token of the simple-asm.
ExprResult ParseSimpleAsm(bool ForAsmLabel, SourceLocation *EndLoc);
ExprResult ParseAsmStringLiteral(bool ForAsmLabel);
// Objective-C External Declarations
void MaybeSkipAttributes(tok::ObjCKeywordKind Kind);
DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs);
DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc);
Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
ParsedAttributes &prefixAttrs);
class ObjCTypeParamListScope;
ObjCTypeParamList *parseObjCTypeParamList();
ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs(
ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc,
SmallVectorImpl<IdentifierLocPair> &protocolIdents,
SourceLocation &rAngleLoc, bool mayBeProtocolList = true);
void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
BalancedDelimiterTracker &T,
SmallVectorImpl<Decl *> &AllIvarDecls,
bool RBraceMissing);
void ParseObjCClassInstanceVariables(Decl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc);
bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P,
SmallVectorImpl<SourceLocation> &PLocs,
bool WarnOnDeclarations,
bool ForObjCContainer,
SourceLocation &LAngleLoc,
SourceLocation &EndProtoLoc,
bool consumeLastToken);
/// Parse the first angle-bracket-delimited clause for an
/// Objective-C object or object pointer type, which may be either
/// type arguments or protocol qualifiers.
void parseObjCTypeArgsOrProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken,
bool warnOnIncompleteProtocols);
/// Parse either Objective-C type arguments or protocol qualifiers; if the
/// former, also parse protocol qualifiers afterward.
void parseObjCTypeArgsAndProtocolQualifiers(
ParsedType baseType,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SmallVectorImpl<SourceLocation> &protocolLocs,
SourceLocation &protocolRAngleLoc,
bool consumeLastToken);
/// Parse a protocol qualifier type such as '<NSCopying>', which is
/// an anachronistic way of writing 'id<NSCopying>'.
TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc);
/// Parse Objective-C type arguments and protocol qualifiers, extending the
/// current type with the parsed result.
TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc,
ParsedType type,
bool consumeLastToken,
SourceLocation &endLoc);
void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
Decl *CDecl);
DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc,
ParsedAttributes &prefixAttrs);
struct ObjCImplParsingDataRAII {
Parser &P;
Decl *Dcl;
bool HasCFunction;
typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer;
LateParsedObjCMethodContainer LateParsedObjCMethods;
ObjCImplParsingDataRAII(Parser &parser, Decl *D)
: P(parser), Dcl(D), HasCFunction(false) {
P.CurParsedObjCImpl = this;
Finished = false;
}
~ObjCImplParsingDataRAII();
void finish(SourceRange AtEnd);
bool isFinished() const { return Finished; }
private:
bool Finished;
};
ObjCImplParsingDataRAII *CurParsedObjCImpl;
void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl);
DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
ParsedAttributes &Attrs);
DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd);
Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc);
Decl *ParseObjCPropertySynthesize(SourceLocation atLoc);
Decl *ParseObjCPropertyDynamic(SourceLocation atLoc);
IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation);
// Definitions for Objective-c context sensitive keywords recognition.
enum ObjCTypeQual {
objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref,
objc_nonnull, objc_nullable, objc_null_unspecified,
objc_NumQuals
};
IdentifierInfo *ObjCTypeQuals[objc_NumQuals];
bool isTokIdentifier_in() const;
ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx,
ParsedAttributes *ParamAttrs);
void ParseObjCMethodRequirement();
Decl *ParseObjCMethodPrototype(
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition = true);
Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType,
tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword,
bool MethodDefinition=true);
void ParseObjCPropertyAttribute(ObjCDeclSpec &DS);
Decl *ParseObjCMethodDefinition();
public:
//===--------------------------------------------------------------------===//
// C99 6.5: Expressions.
/// TypeCastState - State whether an expression is or may be a type cast.
enum TypeCastState {
NotTypeCast = 0,
MaybeTypeCast,
IsTypeCast
};
ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpressionInExprEvalContext(
TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseCaseExpression(SourceLocation CaseLoc);
ExprResult ParseConstraintExpression();
ExprResult
ParseConstraintLogicalAndExpression(bool IsTrailingRequiresClause);
ExprResult ParseConstraintLogicalOrExpression(bool IsTrailingRequiresClause);
// Expr that doesn't include commas.
ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast);
ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks,
unsigned &NumLineToksConsumed,
bool IsUnevaluated);
ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false);
private:
ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc);
ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc);
ExprResult ParseRHSOfBinaryExpression(ExprResult LHS,
prec::Level MinPrec);
/// Control what ParseCastExpression will parse.
enum CastParseKind {
AnyCastExpr = 0,
UnaryExprOnly,
PrimaryExprOnly
};
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand,
bool &NotCastExpr,
TypeCastState isTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
ExprResult ParseCastExpression(CastParseKind ParseKind,
bool isAddressOfOperand = false,
TypeCastState isTypeCast = NotTypeCast,
bool isVectorLiteral = false,
bool *NotPrimaryExpression = nullptr);
/// Returns true if the next token cannot start an expression.
bool isNotExpressionStart();
/// Returns true if the next token would start a postfix-expression
/// suffix.
bool isPostfixExpressionSuffixStart() {
tok::TokenKind K = Tok.getKind();
return (K == tok::l_square || K == tok::l_paren ||
K == tok::period || K == tok::arrow ||
K == tok::plusplus || K == tok::minusminus);
}
bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less);
void checkPotentialAngleBracket(ExprResult &PotentialTemplateName);
bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &,
const Token &OpToken);
bool checkPotentialAngleBracketDelimiter(const Token &OpToken) {
if (auto *Info = AngleBrackets.getCurrent(*this))
return checkPotentialAngleBracketDelimiter(*Info, OpToken);
return false;
}
ExprResult ParsePostfixExpressionSuffix(ExprResult LHS);
ExprResult ParseUnaryExprOrTypeTraitExpression();
ExprResult ParseBuiltinPrimaryExpression();
ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
bool &isCastExpr,
ParsedType &CastTy,
SourceRange &CastRange);
typedef SmallVector<Expr*, 20> ExprListTy;
typedef SmallVector<SourceLocation, 20> CommaLocsTy;
/// ParseExpressionList - Used for C/C++ (argument-)expression-list.
bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
llvm::function_ref<void()> ExpressionStarts =
llvm::function_ref<void()>());
/// ParseSimpleExpressionList - A simple comma-separated list of expressions,
/// used for misc language extensions.
bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs);
/// ParenParseOption - Control what ParseParenExpression will parse.
enum ParenParseOption {
SimpleExpr, // Only parse '(' expression ')'
FoldExpr, // Also allow fold-expression <anything>
CompoundStmt, // Also allow '(' compound-statement ')'
CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}'
CastExpr // Also allow '(' type-name ')' <anything>
};
ExprResult ParseParenExpression(ParenParseOption &ExprType,
bool stopIfCastExpr,
bool isTypeCast,
ParsedType &CastTy,
SourceLocation &RParenLoc);
ExprResult ParseCXXAmbiguousParenExpression(
ParenParseOption &ExprType, ParsedType &CastTy,
BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt);
ExprResult ParseCompoundLiteralExpression(ParsedType Ty,
SourceLocation LParenLoc,
SourceLocation RParenLoc);
ExprResult ParseGenericSelectionExpression();
ExprResult ParseObjCBoolLiteral();
ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T);
//===--------------------------------------------------------------------===//
// C++ Expressions
ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand,
Token &Replacement);
ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false);
bool areTokensAdjacent(const Token &A, const Token &B);
void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr,
bool EnteringContext, IdentifierInfo &II,
CXXScopeSpec &SS);
bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType,
bool EnteringContext,
bool *MayBePseudoDestructor = nullptr,
bool IsTypename = false,
IdentifierInfo **LastII = nullptr,
bool OnlyNamespace = false,
bool InUsingDeclaration = false);
//===--------------------------------------------------------------------===//
// C++11 5.1.2: Lambda expressions
/// Result of tentatively parsing a lambda-introducer.
enum class LambdaIntroducerTentativeParse {
/// This appears to be a lambda-introducer, which has been fully parsed.
Success,
/// This is a lambda-introducer, but has not been fully parsed, and this
/// function needs to be called again to parse it.
Incomplete,
/// This is definitely an Objective-C message send expression, rather than
/// a lambda-introducer, attribute-specifier, or array designator.
MessageSend,
/// This is not a lambda-introducer.
Invalid,
};
// [...] () -> type {...}
ExprResult ParseLambdaExpression();
ExprResult TryParseLambdaExpression();
bool
ParseLambdaIntroducer(LambdaIntroducer &Intro,
LambdaIntroducerTentativeParse *Tentative = nullptr);
ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro);
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Casts
ExprResult ParseCXXCasts();
/// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast.
ExprResult ParseBuiltinBitCast();
//===--------------------------------------------------------------------===//
// C++ 5.2p1: C++ Type Identification
ExprResult ParseCXXTypeid();
//===--------------------------------------------------------------------===//
// C++ : Microsoft __uuidof Expression
ExprResult ParseCXXUuidof();
//===--------------------------------------------------------------------===//
// C++ 5.2.4: C++ Pseudo-Destructor Expressions
ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
ParsedType ObjectType);
//===--------------------------------------------------------------------===//
// C++ 9.3.2: C++ 'this' pointer
ExprResult ParseCXXThis();
//===--------------------------------------------------------------------===//
// C++ 15: C++ Throw Expression
ExprResult ParseThrowExpression();
ExceptionSpecificationType tryParseExceptionSpecification(
bool Delayed,
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &DynamicExceptions,
SmallVectorImpl<SourceRange> &DynamicExceptionRanges,
ExprResult &NoexceptExpr,
CachedTokens *&ExceptionSpecTokens);
// EndLoc is filled with the location of the last token of the specification.
ExceptionSpecificationType ParseDynamicExceptionSpecification(
SourceRange &SpecificationRange,
SmallVectorImpl<ParsedType> &Exceptions,
SmallVectorImpl<SourceRange> &Ranges);
//===--------------------------------------------------------------------===//
// C++0x 8: Function declaration trailing-return-type
TypeResult ParseTrailingReturnType(SourceRange &Range,
bool MayBeFollowedByDirectInit);
//===--------------------------------------------------------------------===//
// C++ 2.13.5: C++ Boolean Literals
ExprResult ParseCXXBoolLiteral();
//===--------------------------------------------------------------------===//
// C++ 5.2.3: Explicit type conversion (functional notation)
ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS);
/// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers.
/// This should only be called when the current token is known to be part of
/// simple-type-specifier.
void ParseCXXSimpleTypeSpecifier(DeclSpec &DS);
bool ParseCXXTypeSpecifierSeq(DeclSpec &DS);
//===--------------------------------------------------------------------===//
// C++ 5.3.4 and 5.3.5: C++ new and delete
bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs,
Declarator &D);
void ParseDirectNewDeclarator(Declarator &D);
ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start);
ExprResult ParseCXXDeleteExpression(bool UseGlobal,
SourceLocation Start);
//===--------------------------------------------------------------------===//
// C++ if/switch/while/for condition expression.
struct ForRangeInfo;
Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt,
SourceLocation Loc,
Sema::ConditionKind CK,
ForRangeInfo *FRI = nullptr);
//===--------------------------------------------------------------------===//
// C++ Coroutines
ExprResult ParseCoyieldExpression();
//===--------------------------------------------------------------------===//
// C++ Concepts
ExprResult ParseRequiresExpression();
void ParseTrailingRequiresClause(Declarator &D);
//===--------------------------------------------------------------------===//
// C99 6.7.8: Initialization.
/// ParseInitializer
/// initializer: [C99 6.7.8]
/// assignment-expression
/// '{' ...
ExprResult ParseInitializer() {
if (Tok.isNot(tok::l_brace))
return ParseAssignmentExpression();
return ParseBraceInitializer();
}
bool MayBeDesignationStart();
ExprResult ParseBraceInitializer();
ExprResult ParseInitializerWithPotentialDesignator(
llvm::function_ref<void(const Designation &)> CodeCompleteCB);
//===--------------------------------------------------------------------===//
// clang Expressions
ExprResult ParseBlockLiteralExpression(); // ^{...}
//===--------------------------------------------------------------------===//
// Objective-C Expressions
ExprResult ParseObjCAtExpression(SourceLocation AtLocation);
ExprResult ParseObjCStringLiteral(SourceLocation AtLoc);
ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc);
ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue);
ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc);
ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc);
ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc);
ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc);
ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc);
bool isSimpleObjCMessageExpression();
ExprResult ParseObjCMessageExpression();
ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc,
SourceLocation SuperLoc,
ParsedType ReceiverType,
Expr *ReceiverExpr);
ExprResult ParseAssignmentExprWithObjCMessageExprStart(
SourceLocation LBracloc, SourceLocation SuperLoc,
ParsedType ReceiverType, Expr *ReceiverExpr);
bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr);
//===--------------------------------------------------------------------===//
// C99 6.8: Statements and Blocks.
/// A SmallVector of statements, with stack size 32 (as that is the only one
/// used.)
typedef SmallVector<Stmt*, 32> StmtVector;
/// A SmallVector of expressions, with stack size 12 (the maximum used.)
typedef SmallVector<Expr*, 12> ExprVector;
/// A SmallVector of types.
typedef SmallVector<ParsedType, 12> TypeVector;
StmtResult
ParseStatement(SourceLocation *TrailingElseLoc = nullptr,
ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt);
StmtResult ParseStatementOrDeclaration(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc = nullptr);
StmtResult ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
StmtResult ParseExprStatement(ParsedStmtContext StmtCtx);
StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs,
ParsedStmtContext StmtCtx);
StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx,
bool MissingCase = false,
ExprResult Expr = ExprResult());
StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx);
StmtResult ParseCompoundStatement(bool isStmtExpr = false);
StmtResult ParseCompoundStatement(bool isStmtExpr,
unsigned ScopeFlags);
void ParseCompoundStatementLeadingPragmas();
bool ConsumeNullStmt(StmtVector &Stmts);
StmtResult ParseCompoundStatementBody(bool isStmtExpr = false);
bool ParseParenExprOrCondition(StmtResult *InitStmt,
Sema::ConditionResult &CondResult,
SourceLocation Loc,
Sema::ConditionKind CK);
StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseDoStatement();
StmtResult ParseForStatement(SourceLocation *TrailingElseLoc);
StmtResult ParseGotoStatement();
StmtResult ParseContinueStatement();
StmtResult ParseBreakStatement();
StmtResult ParseReturnStatement();
StmtResult ParseAsmStatement(bool &msAsm);
StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc);
StmtResult ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
ParsedAttributesWithRange &Attrs);
/// Describes the behavior that should be taken for an __if_exists
/// block.
enum IfExistsBehavior {
/// Parse the block; this code is always used.
IEB_Parse,
/// Skip the block entirely; this code is never used.
IEB_Skip,
/// Parse the block as a dependent block, which may be used in
/// some template instantiations but not others.
IEB_Dependent
};
/// Describes the condition of a Microsoft __if_exists or
/// __if_not_exists block.
struct IfExistsCondition {
/// The location of the initial keyword.
SourceLocation KeywordLoc;
/// Whether this is an __if_exists block (rather than an
/// __if_not_exists block).
bool IsIfExists;
/// Nested-name-specifier preceding the name.
CXXScopeSpec SS;
/// The name we're looking for.
UnqualifiedId Name;
/// The behavior of this __if_exists or __if_not_exists block
/// should.
IfExistsBehavior Behavior;
};
bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result);
void ParseMicrosoftIfExistsStatement(StmtVector &Stmts);
void ParseMicrosoftIfExistsExternalDeclaration();
void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType,
ParsedAttributes &AccessAttrs,
AccessSpecifier &CurAS);
bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs,
bool &InitExprsOk);
bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names,
SmallVectorImpl<Expr *> &Constraints,
SmallVectorImpl<Expr *> &Exprs);
//===--------------------------------------------------------------------===//
// C++ 6: Statements and Blocks
StmtResult ParseCXXTryBlock();
StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false);
StmtResult ParseCXXCatchBlock(bool FnCatch = false);
//===--------------------------------------------------------------------===//
// MS: SEH Statements and Blocks
StmtResult ParseSEHTryBlock();
StmtResult ParseSEHExceptBlock(SourceLocation Loc);
StmtResult ParseSEHFinallyBlock(SourceLocation Loc);
StmtResult ParseSEHLeaveStatement();
//===--------------------------------------------------------------------===//
// Objective-C Statements
StmtResult ParseObjCAtStatement(SourceLocation atLoc,
ParsedStmtContext StmtCtx);
StmtResult ParseObjCTryStmt(SourceLocation atLoc);
StmtResult ParseObjCThrowStmt(SourceLocation atLoc);
StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc);
StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc);
//===--------------------------------------------------------------------===//
// C99 6.7: Declarations.
/// A context for parsing declaration specifiers. TODO: flesh this
/// out, there are other significant restrictions on specifiers than
/// would be best implemented in the parser.
enum class DeclSpecContext {
DSC_normal, // normal context
DSC_class, // class context, enables 'friend'
DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list
DSC_trailing, // C++11 trailing-type-specifier in a trailing return type
DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration
DSC_top_level, // top-level/namespace declaration context
DSC_template_param, // template parameter context
DSC_template_type_arg, // template type argument context
DSC_objc_method_result, // ObjC method result context, enables 'instancetype'
DSC_condition // condition declaration context
};
/// Is this a context in which we are parsing just a type-specifier (or
/// trailing-type-specifier)?
static bool isTypeSpecifier(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_condition:
return false;
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_type_specifier:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return true;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Is this a context in which we can perform class template argument
/// deduction?
static bool isClassTemplateDeductionContext(DeclSpecContext DSC) {
switch (DSC) {
case DeclSpecContext::DSC_normal:
case DeclSpecContext::DSC_template_param:
case DeclSpecContext::DSC_class:
case DeclSpecContext::DSC_top_level:
case DeclSpecContext::DSC_condition:
case DeclSpecContext::DSC_type_specifier:
return true;
case DeclSpecContext::DSC_objc_method_result:
case DeclSpecContext::DSC_template_type_arg:
case DeclSpecContext::DSC_trailing:
case DeclSpecContext::DSC_alias_declaration:
return false;
}
llvm_unreachable("Missing DeclSpecContext case");
}
/// Information on a C++0x for-range-initializer found while parsing a
/// declaration which turns out to be a for-range-declaration.
struct ForRangeInit {
SourceLocation ColonLoc;
ExprResult RangeExpr;
bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); }
};
struct ForRangeInfo : ForRangeInit {
StmtResult LoopVar;
};
DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs,
SourceLocation *DeclSpecStart = nullptr);
DeclGroupPtrTy
ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
ParsedAttributesWithRange &attrs, bool RequireSemi,
ForRangeInit *FRI = nullptr,
SourceLocation *DeclSpecStart = nullptr);
bool MightBeDeclarator(DeclaratorContext Context);
DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context,
SourceLocation *DeclEnd = nullptr,
ForRangeInit *FRI = nullptr);
Decl *ParseDeclarationAfterDeclarator(Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo());
bool ParseAsmAttributesAfterDeclarator(Declarator &D);
Decl *ParseDeclarationAfterDeclaratorAndAttributes(
Declarator &D,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ForRangeInit *FRI = nullptr);
Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope);
Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope);
/// When in code-completion, skip parsing of the function/method body
/// unless the body contains the code-completion point.
///
/// \returns true if the function body was skipped.
bool trySkippingFunctionBody();
bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
ParsedAttributesWithRange &Attrs);
DeclSpecContext
getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context);
void ParseDeclarationSpecifiers(
DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal,
LateParsedAttrList *LateAttrs = nullptr);
bool DiagnoseMissingSemiAfterTagDefinition(
DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext,
LateParsedAttrList *LateAttrs = nullptr);
void ParseSpecifierQualifierList(
DeclSpec &DS, AccessSpecifier AS = AS_none,
DeclSpecContext DSC = DeclSpecContext::DSC_normal);
void ParseObjCTypeQualifierList(ObjCDeclSpec &DS,
DeclaratorContext Context);
void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC);
void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl);
void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType,
Decl *TagDecl);
void ParseStructDeclaration(
ParsingDeclSpec &DS,
llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback);
bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false);
bool isTypeSpecifierQualifier();
/// isKnownToBeTypeSpecifier - Return true if we know that the specified token
/// is definitely a type-specifier. Return false if it isn't part of a type
/// specifier or if we're not sure.
bool isKnownToBeTypeSpecifier(const Token &Tok) const;
/// Return true if we know that we are definitely looking at a
/// decl-specifier, and isn't part of an expression such as a function-style
/// cast. Return false if it's no a decl-specifier, or we're not sure.
bool isKnownToBeDeclarationSpecifier() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationSpecifier() == TPResult::True;
return isDeclarationSpecifier(true);
}
/// isDeclarationStatement - Disambiguates between a declaration or an
/// expression statement, when parsing function bodies.
/// Returns true for declaration, false for expression.
bool isDeclarationStatement() {
if (getLangOpts().CPlusPlus)
return isCXXDeclarationStatement();
return isDeclarationSpecifier(true);
}
/// isForInitDeclaration - Disambiguates between a declaration or an
/// expression in the context of the C 'clause-1' or the C++
// 'for-init-statement' part of a 'for' statement.
/// Returns true for declaration, false for expression.
bool isForInitDeclaration() {
if (getLangOpts().OpenMP)
Actions.startOpenMPLoop();
if (getLangOpts().CPlusPlus)
return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true);
return isDeclarationSpecifier(true);
}
/// Determine whether this is a C++1z for-range-identifier.
bool isForRangeIdentifier();
/// Determine whether we are currently at the start of an Objective-C
/// class message that appears to be missing the open bracket '['.
bool isStartOfObjCClassMessageMissingOpenBracket();
/// Starting with a scope specifier, identifier, or
/// template-id that refers to the current class, determine whether
/// this is a constructor declarator.
bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false);
/// Specifies the context in which type-id/expression
/// disambiguation will occur.
enum TentativeCXXTypeIdContext {
TypeIdInParens,
TypeIdUnambiguous,
TypeIdAsTemplateArgument
};
/// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know
/// whether the parens contain an expression or a type-id.
/// Returns true for a type-id and false for an expression.
bool isTypeIdInParens(bool &isAmbiguous) {
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdInParens, isAmbiguous);
isAmbiguous = false;
return isTypeSpecifierQualifier();
}
bool isTypeIdInParens() {
bool isAmbiguous;
return isTypeIdInParens(isAmbiguous);
}
/// Checks if the current tokens form type-id or expression.
/// It is similar to isTypeIdInParens but does not suppose that type-id
/// is in parenthesis.
bool isTypeIdUnambiguously() {
bool IsAmbiguous;
if (getLangOpts().CPlusPlus)
return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous);
return isTypeSpecifierQualifier();
}
/// isCXXDeclarationStatement - C++-specialized function that disambiguates
/// between a declaration or an expression statement, when parsing function
/// bodies. Returns true for declaration, false for expression.
bool isCXXDeclarationStatement();
/// isCXXSimpleDeclaration - C++-specialized function that disambiguates
/// between a simple-declaration or an expression-statement.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
/// Returns false if the statement is disambiguated as expression.
bool isCXXSimpleDeclaration(bool AllowForRangeDecl);
/// isCXXFunctionDeclarator - Disambiguates between a function declarator or
/// a constructor-style initializer, when parsing declaration statements.
/// Returns true for function declarator and false for constructor-style
/// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration
/// might be a constructor-style initializer.
/// If during the disambiguation process a parsing error is encountered,
/// the function returns true to let the declaration parsing code handle it.
bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr);
struct ConditionDeclarationOrInitStatementState;
enum class ConditionOrInitStatement {
Expression, ///< Disambiguated as an expression (either kind).
ConditionDecl, ///< Disambiguated as the declaration form of condition.
InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement.
ForRangeDecl, ///< Disambiguated as a for-range declaration.
Error ///< Can't be any of the above!
};
/// Disambiguates between the different kinds of things that can happen
/// after 'if (' or 'switch ('. This could be one of two different kinds of
/// declaration (depending on whether there is a ';' later) or an expression.
ConditionOrInitStatement
isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt,
bool CanBeForRangeDecl);
bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous);
bool isCXXTypeId(TentativeCXXTypeIdContext Context) {
bool isAmbiguous;
return isCXXTypeId(Context, isAmbiguous);
}
/// TPResult - Used as the result value for functions whose purpose is to
/// disambiguate C++ constructs by "tentatively parsing" them.
enum class TPResult {
True, False, Ambiguous, Error
};
/// Based only on the given token kind, determine whether we know that
/// we're at the start of an expression or a type-specifier-seq (which may
/// be an expression, in C++).
///
/// This routine does not attempt to resolve any of the trick cases, e.g.,
/// those involving lookup of identifiers.
///
/// \returns \c TPR_true if this token starts an expression, \c TPR_false if
/// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot
/// tell.
TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind);
/// isCXXDeclarationSpecifier - Returns TPResult::True if it is a
/// declaration specifier, TPResult::False if it is not,
/// TPResult::Ambiguous if it could be either a decl-specifier or a
/// function-style cast, and TPResult::Error if a parsing error was
/// encountered. If it could be a braced C++11 function-style cast, returns
/// BracedCastResult.
/// Doesn't consume tokens.
TPResult
isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False,
bool *InvalidAsDeclSpec = nullptr);
/// Given that isCXXDeclarationSpecifier returns \c TPResult::True or
/// \c TPResult::Ambiguous, determine whether the decl-specifier would be
/// a type-specifier other than a cv-qualifier.
bool isCXXDeclarationSpecifierAType();
/// Determine whether the current token sequence might be
/// '<' template-argument-list '>'
/// rather than a less-than expression.
TPResult isTemplateArgumentList(unsigned TokensToSkip);
/// Determine whether an '(' after an 'explicit' keyword is part of a C++20
/// 'explicit(bool)' declaration, in earlier language modes where that is an
/// extension.
TPResult isExplicitBool();
/// Determine whether an identifier has been tentatively declared as a
/// non-type. Such tentative declarations should not be found to name a type
/// during a tentative parse, but also should not be annotated as a non-type.
bool isTentativelyDeclared(IdentifierInfo *II);
// "Tentative parsing" functions, used for disambiguation. If a parsing error
// is encountered they will return TPResult::Error.
// Returning TPResult::True/False indicates that the ambiguity was
// resolved and tentative parsing may stop. TPResult::Ambiguous indicates
// that more tentative parsing is necessary for disambiguation.
// They all consume tokens, so backtracking should be used after calling them.
TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl);
TPResult TryParseTypeofSpecifier();
TPResult TryParseProtocolQualifiers();
TPResult TryParsePtrOperatorSeq();
TPResult TryParseOperatorId();
TPResult TryParseInitDeclaratorList();
TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true,
bool mayHaveDirectInit = false);
TPResult
TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr,
bool VersusTemplateArg = false);
TPResult TryParseFunctionDeclarator();
TPResult TryParseBracketDeclarator();
TPResult TryConsumeDeclarationSpecifier();
/// Try to skip a possibly empty sequence of 'attribute-specifier's without
/// full validation of the syntactic structure of attributes.
bool TrySkipAttributes();
public:
TypeResult ParseTypeName(SourceRange *Range = nullptr,
DeclaratorContext Context
= DeclaratorContext::TypeNameContext,
AccessSpecifier AS = AS_none,
Decl **OwnedType = nullptr,
ParsedAttributes *Attrs = nullptr);
private:
void ParseBlockId(SourceLocation CaretLoc);
/// Are [[]] attributes enabled?
bool standardAttributesAllowed() const {
const LangOptions &LO = getLangOpts();
return LO.DoubleSquareBracketAttributes;
}
// Check for the start of an attribute-specifier-seq in a context where an
// attribute is not allowed.
bool CheckProhibitedCXX11Attribute() {
assert(Tok.is(tok::l_square));
if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square))
return false;
return DiagnoseProhibitedCXX11Attribute();
}
bool DiagnoseProhibitedCXX11Attribute();
void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation) {
if (!standardAttributesAllowed())
return;
if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) &&
Tok.isNot(tok::kw_alignas))
return;
DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation);
}
void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
DeclSpec &DS, Sema::TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clear();
}
void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs,
SourceLocation FixItLoc = SourceLocation()) {
if (Attrs.Range.isInvalid())
return;
DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc);
Attrs.clearListOnly();
}
void DiagnoseProhibitedAttributes(const SourceRange &Range,
SourceLocation FixItLoc);
// Forbid C++11 and C2x attributes that appear on certain syntactic locations
// which standard permits but we don't supported yet, for example, attributes
// appertain to decl specifiers.
void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
unsigned DiagID);
/// Skip C++11 and C2x attributes and return the end location of the
/// last one.
/// \returns SourceLocation() if there are no attributes.
SourceLocation SkipCXX11Attributes();
/// Diagnose and skip C++11 and C2x attributes that appear in syntactic
/// locations where attributes are not allowed.
void DiagnoseAndSkipCXX11Attributes();
/// Parses syntax-generic attribute arguments for attributes which are
/// known to the implementation, and adds them to the given ParsedAttributes
/// list with the given attribute syntax. Returns the number of arguments
/// parsed for the attribute.
unsigned
ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseGNUAttributes(Declarator &D,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute)) {
ParsedAttributes attrs(AttrFactory);
SourceLocation endLoc;
ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr) {
if (Tok.is(tok::kw___attribute))
ParseGNUAttributes(attrs, endLoc, LateAttrs);
}
void ParseGNUAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr,
LateParsedAttrList *LateAttrs = nullptr,
Declarator *D = nullptr);
void ParseGNUAttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax, Declarator *D);
IdentifierLoc *ParseIdentifierLoc();
unsigned
ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName, SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void MaybeParseCXX11Attributes(Declarator &D) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrs(AttrFactory);
SourceLocation endLoc;
ParseCXX11Attributes(attrs, &endLoc);
D.takeAttributes(attrs, endLoc);
}
}
void MaybeParseCXX11Attributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
ParsedAttributesWithRange attrsWithRange(AttrFactory);
ParseCXX11Attributes(attrsWithRange, endLoc);
attrs.takeAllFrom(attrsWithRange);
}
}
void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *endLoc = nullptr,
bool OuterMightBeMessageSend = false) {
if (standardAttributesAllowed() &&
isCXX11AttributeSpecifier(false, OuterMightBeMessageSend))
ParseCXX11Attributes(attrs, endLoc);
}
void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs,
SourceLocation *EndLoc = nullptr);
void ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
SourceLocation *EndLoc = nullptr);
/// Parses a C++11 (or C2x)-style attribute argument list. Returns true
/// if this results in adding an attribute to the ParsedAttributes list.
bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc);
IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc);
void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr) {
if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square))
ParseMicrosoftAttributes(attrs, endLoc);
}
void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs);
void ParseMicrosoftAttributes(ParsedAttributes &attrs,
SourceLocation *endLoc = nullptr);
void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr) {
const auto &LO = getLangOpts();
if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec))
ParseMicrosoftDeclSpecs(Attrs, End);
}
void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
SourceLocation *End = nullptr);
bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs);
void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs);
void DiagnoseAndSkipExtendedMicrosoftTypeAttributes();
SourceLocation SkipExtendedMicrosoftTypeAttributes();
void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs);
void ParseBorlandTypeAttributes(ParsedAttributes &attrs);
void ParseOpenCLKernelAttributes(ParsedAttributes &attrs);
void ParseOpenCLQualifiers(ParsedAttributes &Attrs);
/// Parses opencl_unroll_hint attribute if language is OpenCL v2.0
/// or higher.
/// \return false if error happens.
bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) {
if (getLangOpts().OpenCL)
return ParseOpenCLUnrollHintAttribute(Attrs);
return true;
}
/// Parses opencl_unroll_hint attribute.
/// \return false if error happens.
bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs);
void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs);
VersionTuple ParseVersionTuple(SourceRange &Range);
void ParseAvailabilityAttribute(IdentifierInfo &Availability,
SourceLocation AvailabilityLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
Optional<AvailabilitySpec> ParseAvailabilitySpec();
ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc);
void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol,
SourceLocation Loc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
SourceLocation ObjCBridgeRelatedLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype,
SourceLocation SwiftNewtypeLoc,
ParsedAttributes &attrs,
SourceLocation *endLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax);
void
ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc, ParsedAttributes &Attrs,
SourceLocation *EndLoc, IdentifierInfo *ScopeName,
SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax);
void ParseTypeofSpecifier(DeclSpec &DS);
SourceLocation ParseDecltypeSpecifier(DeclSpec &DS);
void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ParseUnderlyingTypeSpecifier(DeclSpec &DS);
void ParseAtomicSpecifier(DeclSpec &DS);
ExprResult ParseAlignArgument(SourceLocation Start,
SourceLocation &EllipsisLoc);
void ParseAlignmentSpecifier(ParsedAttributes &Attrs,
SourceLocation *endLoc = nullptr);
void ParsePtrauthQualifier(ParsedAttributes &Attrs);
VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const;
VirtSpecifiers::Specifier isCXX11VirtSpecifier() const {
return isCXX11VirtSpecifier(Tok);
}
void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface,
SourceLocation FriendLoc);
bool isCXX11FinalKeyword() const;
/// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to
/// enter a new C++ declarator scope and exit it when the function is
/// finished.
class DeclaratorScopeObj {
Parser &P;
CXXScopeSpec &SS;
bool EnteredScope;
bool CreatedScope;
public:
DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss)
: P(p), SS(ss), EnteredScope(false), CreatedScope(false) {}
void EnterDeclaratorScope() {
assert(!EnteredScope && "Already entered the scope!");
assert(SS.isSet() && "C++ scope was not set!");
CreatedScope = true;
P.EnterScope(0); // Not a decl scope.
if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS))
EnteredScope = true;
}
~DeclaratorScopeObj() {
if (EnteredScope) {
assert(SS.isSet() && "C++ scope was cleared ?");
P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS);
}
if (CreatedScope)
P.ExitScope();
}
};
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
void ParseDeclarator(Declarator &D);
/// A function that parses a variant of direct-declarator.
typedef void (Parser::*DirectDeclParseFunction)(Declarator&);
void ParseDeclaratorInternal(Declarator &D,
DirectDeclParseFunction DirectDeclParser);
enum AttrRequirements {
AR_NoAttributesParsed = 0, ///< No attributes are diagnosed.
AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes.
AR_GNUAttributesParsed = 1 << 1,
AR_CXX11AttributesParsed = 1 << 2,
AR_DeclspecAttributesParsed = 1 << 3,
AR_AllAttributesParsed = AR_GNUAttributesParsed |
AR_CXX11AttributesParsed |
AR_DeclspecAttributesParsed,
AR_VendorAttributesParsed = AR_GNUAttributesParsed |
AR_DeclspecAttributesParsed
};
void ParseTypeQualifierListOpt(
DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed,
bool AtomicAllowed = true, bool IdentifierRequired = false,
Optional<llvm::function_ref<void()>> CodeCompletionHandler = None);
void ParseDirectDeclarator(Declarator &D);
void ParseDecompositionDeclarator(Declarator &D);
void ParseParenDeclarator(Declarator &D);
void ParseFunctionDeclarator(Declarator &D,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker,
bool IsAmbiguous,
bool RequiresArg = false);
void InitCXXThisScopeForDeclaratorIfRelevant(
const Declarator &D, const DeclSpec &DS,
llvm::Optional<Sema::CXXThisScopeRAII> &ThisScope);
bool ParseRefQualifier(bool &RefQualifierIsLValueRef,
SourceLocation &RefQualifierLoc);
bool isFunctionDeclaratorIdentifierList();
void ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo);
void ParseParameterDeclarationClause(
DeclaratorContext DeclaratorContext,
ParsedAttributes &attrs,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
SourceLocation &EllipsisLoc);
void ParseBracketDeclarator(Declarator &D);
void ParseMisplacedBracketDeclarator(Declarator &D);
//===--------------------------------------------------------------------===//
// C++ 7: Declarations [dcl.dcl]
/// The kind of attribute specifier we have found.
enum CXX11AttributeKind {
/// This is not an attribute specifier.
CAK_NotAttributeSpecifier,
/// This should be treated as an attribute-specifier.
CAK_AttributeSpecifier,
/// The next tokens are '[[', but this is not an attribute-specifier. This
/// is ill-formed by C++11 [dcl.attr.grammar]p6.
CAK_InvalidAttributeSpecifier
};
CXX11AttributeKind
isCXX11AttributeSpecifier(bool Disambiguate = false,
bool OuterMightBeMessageSend = false);
void DiagnoseUnexpectedNamespace(NamedDecl *Context);
DeclGroupPtrTy ParseNamespace(DeclaratorContext Context,
SourceLocation &DeclEnd,
SourceLocation InlineLoc = SourceLocation());
struct InnerNamespaceInfo {
SourceLocation NamespaceLoc;
SourceLocation InlineLoc;
SourceLocation IdentLoc;
IdentifierInfo *Ident;
};
using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>;
void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
unsigned int index, SourceLocation &InlineLoc,
ParsedAttributes &attrs,
BalancedDelimiterTracker &Tracker);
Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context);
Decl *ParseExportDeclaration();
DeclGroupPtrTy ParseUsingDirectiveOrDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs);
Decl *ParseUsingDirective(DeclaratorContext Context,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
ParsedAttributes &attrs);
struct UsingDeclarator {
SourceLocation TypenameLoc;
CXXScopeSpec SS;
UnqualifiedId Name;
SourceLocation EllipsisLoc;
void clear() {
TypenameLoc = EllipsisLoc = SourceLocation();
SS.clear();
Name.clear();
}
};
bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D);
DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context,
const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc,
SourceLocation &DeclEnd,
AccessSpecifier AS = AS_none);
Decl *ParseAliasDeclarationAfterDeclarator(
const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc,
UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS,
ParsedAttributes &Attrs, Decl **OwnedType = nullptr);
Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd);
Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc,
SourceLocation AliasLoc, IdentifierInfo *Alias,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// C++ 9: classes [class] and C structs/unions.
bool isValidAfterTypeSpecifier(bool CouldBeBitfield);
void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc,
DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, bool EnteringContext,
DeclSpecContext DSC,
ParsedAttributesWithRange &Attributes);
void SkipCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
unsigned TagType,
Decl *TagDecl);
void ParseCXXMemberSpecification(SourceLocation StartLoc,
SourceLocation AttrFixitLoc,
ParsedAttributesWithRange &Attrs,
unsigned TagType,
Decl *TagDecl);
ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction,
SourceLocation &EqualLoc);
bool
ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo,
VirtSpecifiers &VS,
ExprResult &BitfieldSize,
LateParsedAttrList &LateAttrs);
void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D,
VirtSpecifiers &VS);
DeclGroupPtrTy ParseCXXClassMemberDeclaration(
AccessSpecifier AS, ParsedAttributes &Attr,
const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(),
ParsingDeclRAIIObject *DiagsFromTParams = nullptr);
DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas(
AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
DeclSpec::TST TagType, Decl *Tag);
void ParseConstructorInitializer(Decl *ConstructorDecl);
MemInitResult ParseMemInitializer(Decl *ConstructorDecl);
void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo,
Decl *ThisDecl);
//===--------------------------------------------------------------------===//
// C++ 10: Derived classes [class.derived]
TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
SourceLocation &EndLocation);
void ParseBaseClause(Decl *ClassDecl);
BaseResult ParseBaseSpecifier(Decl *ClassDecl);
AccessSpecifier getAccessSpecifierIfPresent() const;
bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Id,
bool AssumeTemplateId);
bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
ParsedType ObjectType,
UnqualifiedId &Result);
//===--------------------------------------------------------------------===//
// OpenMP: Directives and clauses.
/// Parse clauses for '#pragma omp declare simd'.
DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr,
CachedTokens &Toks,
SourceLocation Loc);
/// Parse a property kind into \p TIProperty for the selector set \p Set and
/// selector \p Selector.
void parseOMPTraitPropertyKind(OMPTraitInfo::OMPTraitProperty &TIProperty,
llvm::omp::TraitSet Set,
llvm::omp::TraitSelector Selector,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector kind into \p TISelector for the selector set \p Set.
void parseOMPTraitSelectorKind(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parse a selector set kind into \p TISet.
void parseOMPTraitSetKind(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context property.
void parseOMPContextProperty(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &Seen);
/// Parses an OpenMP context selector.
void parseOMPContextSelector(OMPTraitInfo::OMPTraitSelector &TISelector,
llvm::omp::TraitSet Set,
llvm::StringMap<SourceLocation> &SeenSelectors);
/// Parses an OpenMP context selector set.
void parseOMPContextSelectorSet(OMPTraitInfo::OMPTraitSet &TISet,
llvm::StringMap<SourceLocation> &SeenSets);
/// Parses OpenMP context selectors.
bool parseOMPContextSelectors(SourceLocation Loc, OMPTraitInfo &TI);
/// Parse clauses for '#pragma omp declare variant'.
void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks,
SourceLocation Loc);
/// Parse clauses for '#pragma omp declare target'.
DeclGroupPtrTy ParseOMPDeclareTargetClauses();
/// Parse '#pragma omp end declare target'.
void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind,
SourceLocation Loc);
/// Parses declarative OpenMP directives.
DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl(
AccessSpecifier &AS, ParsedAttributesWithRange &Attrs,
bool Delayed = false, DeclSpec::TST TagType = DeclSpec::TST_unspecified,
Decl *TagDecl = nullptr);
/// Parse 'omp declare reduction' construct.
DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS);
/// Parses initializer for provided omp_priv declaration inside the reduction
/// initializer.
void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm);
/// Parses 'omp declare mapper' directive.
DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS);
/// Parses variable declaration in 'omp declare mapper' directive.
TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
DeclarationName &Name,
AccessSpecifier AS = AS_none);
/// Parses simple list of variables.
///
/// \param Kind Kind of the directive.
/// \param Callback Callback function to be called for the list elements.
/// \param AllowScopeSpecifier true, if the variables can have fully
/// qualified names.
///
bool ParseOpenMPSimpleVarList(
OpenMPDirectiveKind Kind,
const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> &
Callback,
bool AllowScopeSpecifier);
/// Parses declarative or executable directive.
///
/// \param StmtCtx The context in which we're parsing the directive.
StmtResult
ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx);
/// Parses clause of kind \a CKind for directive of a kind \a Kind.
///
/// \param DKind Kind of current directive.
/// \param CKind Kind of current clause.
/// \param FirstClause true, if this is the first clause of a kind \a CKind
/// in current directive.
///
OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause);
/// Parses clause with a single expression of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses simple clause of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly);
/// Parses clause with a single expression and an additional argument
/// of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind,
bool ParseOnly);
/// Parses clause without any additional arguments.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false);
/// Parses clause with the list of variables of a kind \a Kind.
///
/// \param Kind Kind of current clause.
/// \param ParseOnly true to skip the clause's semantic actions and return
/// nullptr.
///
OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind, bool ParseOnly);
public:
/// Parses simple expression in parens for single-expression clauses of OpenMP
/// constructs.
/// \param RLoc Returned location of right paren.
ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc,
bool IsAddressOfOperand = false);
/// Data used for parsing list of variables in OpenMP clauses.
struct OpenMPVarListDataTy {
Expr *TailExpr = nullptr;
SourceLocation ColonLoc;
SourceLocation RLoc;
CXXScopeSpec ReductionOrMapperIdScopeSpec;
DeclarationNameInfo ReductionOrMapperId;
int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or
///< lastprivate clause.
SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers>
MapTypeModifiers;
SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers>
MapTypeModifiersLoc;
bool IsMapTypeImplicit = false;
SourceLocation DepLinMapLastLoc;
};
/// Parses clauses with list.
bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
OpenMPVarListDataTy &Data);
bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext,
bool AllowDestructorName,
bool AllowConstructorName,
bool AllowDeductionGuide,
ParsedType ObjectType,
SourceLocation *TemplateKWLoc,
UnqualifiedId &Result);
/// Parses the mapper modifier in map, to, and from clauses.
bool parseMapperModifier(OpenMPVarListDataTy &Data);
/// Parses map-type-modifiers in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier)
bool parseMapTypeModifiers(OpenMPVarListDataTy &Data);
private:
//===--------------------------------------------------------------------===//
// C++ 14: Templates [temp]
// C++ 14.1: Template Parameters [temp.param]
Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS);
Decl *ParseSingleDeclarationAfterTemplate(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none);
bool ParseTemplateParameters(unsigned Depth,
SmallVectorImpl<NamedDecl *> &TemplateParams,
SourceLocation &LAngleLoc,
SourceLocation &RAngleLoc);
bool ParseTemplateParameterList(unsigned Depth,
SmallVectorImpl<NamedDecl*> &TemplateParams);
TPResult isStartOfTemplateTypeParameter();
NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position);
NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position);
bool isTypeConstraintAnnotation();
bool TryAnnotateTypeConstraint();
NamedDecl *
ParseConstrainedTemplateTypeParameter(unsigned Depth, unsigned Position);
void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc,
SourceLocation CorrectLoc,
bool AlreadyHasEllipsis,
bool IdentifierHasName);
void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc,
Declarator &D);
// C++ 14.3: Template arguments [temp.arg]
typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList;
bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc,
bool ConsumeLastToken,
bool ObjCGenericList);
bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
SourceLocation &RAngleLoc);
bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &TemplateName,
bool AllowTypeAnnotation = true,
bool TypeConstraint = false);
void AnnotateTemplateIdTokenAsType(CXXScopeSpec &SS,
bool IsClassName = false);
bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs);
ParsedTemplateArgument ParseTemplateTemplateArgument();
ParsedTemplateArgument ParseTemplateArgument();
Decl *ParseExplicitInstantiation(DeclaratorContext Context,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
SourceLocation &DeclEnd,
ParsedAttributes &AccessAttrs,
AccessSpecifier AS = AS_none);
// C++2a: Template, concept definition [temp]
Decl *
ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo,
SourceLocation &DeclEnd);
//===--------------------------------------------------------------------===//
// Modules
DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl);
Decl *ParseModuleImport(SourceLocation AtLoc);
bool parseMisplacedModuleImport();
bool tryParseMisplacedModuleImport() {
tok::TokenKind Kind = Tok.getKind();
if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end ||
Kind == tok::annot_module_include)
return parseMisplacedModuleImport();
return false;
}
bool ParseModuleName(
SourceLocation UseLoc,
SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path,
bool IsImport);
//===--------------------------------------------------------------------===//
// C++11/G++: Type Traits [Type-Traits.html in the GCC manual]
ExprResult ParseTypeTrait();
/// Parse the given string as a type.
///
/// This is a dangerous utility function currently employed only by API notes.
/// It is not a general entry-point for safely parsing types from strings.
///
/// \param typeStr The string to be parsed as a type.
/// \param context The name of the context in which this string is being
/// parsed, which will be used in diagnostics.
/// \param includeLoc The location at which this parse was triggered.
TypeResult parseTypeFromString(StringRef typeStr, StringRef context,
SourceLocation includeLoc);
//===--------------------------------------------------------------------===//
// Embarcadero: Arary and Expression Traits
ExprResult ParseArrayTypeTrait();
ExprResult ParseExpressionTrait();
ExprResult ParseBuiltinPtrauthTypeDiscriminator();
//===--------------------------------------------------------------------===//
// Preprocessor code-completion pass-through
void CodeCompleteDirective(bool InConditional) override;
void CodeCompleteInConditionalExclusion() override;
void CodeCompleteMacroName(bool IsDefinition) override;
void CodeCompletePreprocessorExpression() override;
void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo,
unsigned ArgumentIndex) override;
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override;
void CodeCompleteNaturalLanguage() override;
};
} // end namespace clang
#endif
|
c_fft.c | /* ***********************************************************************
This program is part of the
OpenMP Source Code Repository
http://www.pcg.ull.es/ompscr/
e-mail: ompscr@etsii.ull.es
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
(LICENSE file) along with this program; if not, write to
the Free Software Foundation, Inc., 59 Temple Place, Suite 330,
Boston, MA 02111-1307 USA
FILE: c_fft.c
VERSION: 1.0
DATE: May 2004
AUTHOR: F. de Sande
COMMENTS TO: sande@csi.ull.es
DESCRIPTION: This program computes the Fast Fourier Transform
on an input signal
COMMENTS: The algorithm uses a divide and conquer strategy
and the transform is computed as a combination of the
transforms of the even and odd terms of the original signal.
The code requires nested Parallelism.
Function write_array() is provided only for debuging purposes.
(use a small size signal if you want to write it).
REFERENCES: James W. Cooley and John W. Tukey,
An Algorithm for the Machine Calculation of Complex Fourier Series,
Mathematics of Computation, 1965, vol. 19, no. 90, pg 297-301
http://en.wikipedia.org/wiki/Cooley-Tukey_FFT_algorithm
BASIC PRAGMAS: parallel for
USAGE: ./c_fft.par 8192
INPUT: The size of the input signal
OUTPUT: The code tests the correctness of the result for the input
FILE FORMATS: -
RESTRICTIONS: The size of the input signal MUST be a power of 2
REVISION HISTORY:
**************************************************************************/
#include "OmpSCR.h"
#include <math.h>
#define KILO (1024)
#define DEFAULT_SIZE_IN_KB (64)
#define NUM_ARGS 1
#define NUM_TIMERS 1
typedef double doubleType;
typedef struct {
doubleType re;
doubleType im;
} Complex;
/* -----------------------------------------------------------------------
PROTOTYPES
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a);
void write_array(unsigned Size, Complex *a);
int test_array(unsigned Size, Complex *a);
void FFT(Complex *A, Complex *a, Complex *W, unsigned N, unsigned stride, Complex *D);
void Roots(unsigned Size, Complex *W);
unsigned get_params(int argc, char *argv[]);
/* -----------------------------------------------------------------------
IMPLEMENTATION
* ----------------------------------------------------------------------- */
/* -----------------------------------------------------------------------
Routine: initialize
Description: Initialise a vector of complex numbers
Comment: all numbers have real part 1.0 and imaginary part 0.0
* ----------------------------------------------------------------------- */
void initialize(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++) {
a[i].re = 1.0;
a[i].im = 0.0;
}
}
/* -----------------------------------------------------------------------
Routine: write_array
Description: Display a vector of complex numbers
* ----------------------------------------------------------------------- */
void write_array(unsigned Size, Complex *a) {
unsigned i;
for(i = 0; i < Size; i++)
printf("a[%2u] = [%.8lf,%.8lf]\n", i, a[i].re, a[i].im);
}
/* -----------------------------------------------------------------------
Routine: test_array
Description: Test is true if the complex vector is of the form
[(Size,0),(0,0),...,(0,0)]
* ----------------------------------------------------------------------- */
int test_array(unsigned Size, Complex *a) {
register unsigned i;
unsigned OK = 1;
if((a[0].re == Size) && (a[0].im == 0)) {
for(i = 1; i < Size; i++)
if (a[i].re != 0.0 || a[i].im != 0.0) {
OK = 0;
break;
}
}
else OK = 0;
return OK;
}
/* -----------------------------------------------------------------------
Procedure: Roots
Description: Computes roots of the Unary
Parameters:
unsigned Size, number of roots to compute
Complex *W, vector containing the roots
* ----------------------------------------------------------------------- */
void Roots(unsigned Size, Complex *W) {
register unsigned i;
double phi;
Complex Omega;
phi = 4 * atan(1.0) / (double)Size; /* PI/Size */
Omega.re = cos(phi);
Omega.im = sin(phi);
W[0].re = 1.0;
W[0].im = 0.0;
for(i = 1; i < Size; i++) {
W[i].re = W[i-1].re * Omega.re - W[i-1].im * Omega.im;
W[i].im = W[i-1].re * Omega.im + W[i-1].im * Omega.re;
}
}
/* -----------------------------------------------------------------------
Procedure: FFT
Description: Recursive (divide and conquer) Fast Fourier Transform
Parameters:
Complex *A, transformed output signal
Complex *a, input signal
Complex *W, vector containing the roots
unsigned N, number of elements in a
unsigned stride, between consecutive elements in a to be considered
Complex *D, auxiliar vector to do combination
* ----------------------------------------------------------------------- */
void FFT(Complex *A, Complex *a, Complex *W, unsigned N,
unsigned stride, Complex *D) {
Complex *B, *C;
Complex Aux, *pW;
unsigned n;
int i;
if (N == 1) {
A[0].re = a[0].re;
A[0].im = a[0].im;
}
else {
/* Division stage without copying input data */
n = (N >> 1); /* N = N div 2 */
/* Subproblems resolution stage */
#pragma omp parallel for schedule(dynamic)
for(i = 0; i <= 1; i++) {
FFT(D + i * n, a + i * stride, W, n, stride << 1, A + i * n);
}
/* Combination stage */
B = D;
C = D + n;
#pragma omp parallel for default(none) private(i, Aux, pW) shared(stride, n, A, B, C, W) schedule(dynamic)
for(i = 0; i <= n - 1; i++) {
pW = W + i * stride;
Aux.re = pW->re * C[i].re - pW->im * C[i].im;
Aux.im = pW->re * C[i].im + pW->im * C[i].re;
A[i].re = B[i].re + Aux.re;
A[i].im = B[i].im + Aux.im;
A[i+n].re = B[i].re - Aux.re;
A[i+n].im = B[i].im - Aux.im;
}
}
}
/* ----------------------------------------------------------------------- */
unsigned get_params(int argc, char *argv[]) {
char usage_str[] = "<size_in_Kb>";
unsigned sizeInKb;
if (argc == 2)
sizeInKb = atoi(argv[1]);
else
if (argc == 1)
sizeInKb = DEFAULT_SIZE_IN_KB;
else {
printf("\nUse: %s %s\n", argv[0], usage_str);
exit(-1);
}
printf("\nUse: %s %s\n", argv[0], usage_str);
printf("Running with Size: %d K\n", sizeInKb);
return sizeInKb;
}
/* ----------------------------------------------------------------------- */
int main(int argc, char *argv[]) {
unsigned N;
Complex *a, *A, *W, *D;
int NUMTHREADS;
char *PARAM_NAMES[NUM_ARGS] = {"Size of the input signal (in Kb)"};
char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time" };
char *DEFAULT_VALUES[NUM_ARGS] = {"64"};
NUMTHREADS = omp_get_max_threads();
OSCR_init (NUMTHREADS, "Divide and Conquer Fast Fourier Transform.", "Use 'fft' <size (in K)>", NUM_ARGS,
PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES,
argc, argv);
N = KILO * OSCR_getarg_int(1);
/* N = KILO * get_params(argc, argv); */
/* Memory allocation */
a = (Complex*)calloc(N, sizeof(Complex));
A = (Complex*)calloc(N, sizeof(Complex));
D = (Complex*)calloc(N, sizeof(Complex));
W = (Complex*)calloc(N>>1, sizeof(Complex));
if((a==NULL) || (A==NULL) || (D==NULL) || (W==NULL)) {
printf("Not enough memory initializing arrays\n");
exit(1);
}
initialize(N, a); /* Generate test input signal */
/* write_array(N, a); */
Roots(N >> 1, W); /* Initialise the vector of imaginary roots */
OSCR_timer_start(0);
FFT(A, a, W, N, 1, D);
OSCR_timer_stop(0);
/* write_array(N, A); */
/* Display results and time */
printf("Test array: ");
if (test_array(N, A))
printf("Ok\n");
else
printf("Fails\n");
OSCR_report(1, TIMERS_NAMES);
free(W);
free(D);
free(A);
free(a);
return 0;
}
/*
* vim:ts=2:sw=2:
*/
|
GB_binop__rminus_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint8)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint8)
// A*D function (colscale): GB (_AxD__rminus_uint8)
// D*A function (rowscale): GB (_DxB__rminus_uint8)
// C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint8)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint8)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint8)
// C=scalar+B GB (_bind1st__rminus_uint8)
// C=scalar+B' GB (_bind1st_tran__rminus_uint8)
// C=A+scalar GB (_bind2nd__rminus_uint8)
// C=A'+scalar GB (_bind2nd_tran__rminus_uint8)
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (bij - aij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint8_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint8_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (y - x) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_RMINUS || GxB_NO_UINT8 || GxB_NO_RMINUS_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__rminus_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__rminus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__rminus_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__rminus_uint8)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__rminus_uint8)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = GBX (Bx, p, false) ;
Cx [p] = (bij - x) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__rminus_uint8)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = GBX (Ax, p, false) ;
Cx [p] = (y - aij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij - x) ; \
}
GrB_Info GB (_bind1st_tran__rminus_uint8)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (y - aij) ; \
}
GrB_Info GB (_bind2nd_tran__rminus_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mhpTest3.c | int foo() {
int x;
x = 10;
#pragma omp barrier
int y;
y = 10;
}
int func1() {
int f11;
foo();
int f12;
}
int func2() {
int f21;
foo();
int f22;
}
int main() {
#pragma omp parallel
{
int a = 10;
if(a > 12)
func1();
else
func2();
}
}
|
nl_matrix.c | /*
* Copyright (c) 2004-2010, Bruno Levy
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* * Neither the name of the ALICE Project-Team nor the names of its
* contributors may be used to endorse or promote products derived from this
* software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
* If you modify this software, you should include a notice giving the
* name of the person performing the modification, the date of modification,
* and the reason for such modification.
*
* Contact: Bruno Levy
*
* levy@loria.fr
*
* ALICE Project
* LORIA, INRIA Lorraine,
* Campus Scientifique, BP 239
* 54506 VANDOEUVRE LES NANCY CEDEX
* FRANCE
*
*/
#include "nl_matrix.h"
#include "nl_superlu.h"
#include "nl_cholmod.h"
#include "nl_mkl.h"
#include "nl_context.h"
#include "nl_blas.h"
/*
Some warnings about const cast in callback for
qsort() function.
*/
#ifdef __clang__
#pragma GCC diagnostic ignored "-Wcast-qual"
#endif
/************************************************************************/
void nlDeleteMatrix(NLMatrix M) {
if(M == NULL) {
return;
}
M->destroy_func(M);
NL_DELETE(M);
}
void nlMultMatrixVector(
NLMatrix M, const double* x, double* y
) {
M->mult_func(M,x,y);
}
/************************************************************************/
void nlRowColumnConstruct(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
c->coeff = NULL;
}
void nlRowColumnDestroy(NLRowColumn* c) {
NL_DELETE_ARRAY(c->coeff);
c->size = 0;
c->capacity = 0;
}
void nlRowColumnGrow(NLRowColumn* c) {
if(c->capacity != 0) {
c->capacity = 2 * c->capacity;
c->coeff = NL_RENEW_ARRAY(NLCoeff, c->coeff, c->capacity);
} else {
c->capacity = 4;
c->coeff = NL_NEW_ARRAY(NLCoeff, c->capacity);
}
}
void nlRowColumnAdd(NLRowColumn* c, NLuint index, NLdouble value) {
NLuint i;
for(i=0; i<c->size; i++) {
if(c->coeff[i].index == index) {
c->coeff[i].value += value;
return;
}
}
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
/* Does not check whether the index already exists */
void nlRowColumnAppend(NLRowColumn* c, NLuint index, NLdouble value) {
if(c->size == c->capacity) {
nlRowColumnGrow(c);
}
c->coeff[c->size].index = index;
c->coeff[c->size].value = value;
c->size++;
}
void nlRowColumnZero(NLRowColumn* c) {
c->size = 0;
}
void nlRowColumnClear(NLRowColumn* c) {
c->size = 0;
c->capacity = 0;
NL_DELETE_ARRAY(c->coeff);
}
static int nlCoeffCompare(const void* p1, const void* p2) {
return (((NLCoeff*)(p2))->index < ((NLCoeff*)(p1))->index);
}
void nlRowColumnSort(NLRowColumn* c) {
qsort(c->coeff, c->size, sizeof(NLCoeff), nlCoeffCompare);
}
/******************************************************************************/
/* CRSMatrix data structure */
/**
* \brief Destroys a NLCRSMatrix
* \details Only the memory allocated by the NLCRSMatrix is freed,
* The NLCRSMatrix structure is not freed.
* \param[in,out] M pointer to an NLCRSMatrix
* \relates NLCRSMatrix
*/
static void nlCRSMatrixDestroy(NLCRSMatrix* M) {
NL_DELETE_ARRAY(M->val);
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->sliceptr);
M->m = 0;
M->n = 0;
M->nslices = 0;
}
NLboolean nlCRSMatrixSave(NLCRSMatrix* M, const char* filename) {
#ifdef GARGANTUA
nl_arg_used(M);
nl_arg_used(filename);
nl_assert_not_reached; /* not implemented yet ! */
return NL_FALSE;
#else
NLuint nnz = M->rowptr[M->m];
FILE* f = fopen(filename, "rb");
if(f == NULL) {
nlError("nlCRSMatrixSave", "Could not open file");
return NL_FALSE;
}
fwrite(&M->m, sizeof(NLuint), 1, f);
fwrite(&M->n, sizeof(NLuint), 1, f);
fwrite(&nnz, sizeof(NLuint), 1, f);
fwrite(M->rowptr, sizeof(NLuint), M->m+1, f);
fwrite(M->colind, sizeof(NLuint), nnz, f);
fwrite(M->val, sizeof(double), nnz, f);
return NL_TRUE;
#endif
}
NLboolean nlCRSMatrixLoad(NLCRSMatrix* M, const char* filename) {
#ifdef GARGANTUA
nl_arg_used(M);
nl_arg_used(filename);
nl_assert_not_reached; /* not implemented yet ! */
return NL_FALSE;
#else
NLuint nnz = 0;
FILE* f = fopen(filename, "rb");
NLboolean truncated = NL_FALSE;
if(f == NULL) {
nlError("nlCRSMatrixLoad", "Could not open file");
return NL_FALSE;
}
truncated = truncated || (
fread(&M->m, sizeof(NLuint), 1, f) != 1 ||
fread(&M->n, sizeof(NLuint), 1, f) != 1 ||
fread(&nnz, sizeof(NLuint), 1, f) != 1
);
if(truncated) {
M->rowptr = NULL;
M->colind = NULL;
M->val = NULL;
} else {
M->rowptr = NL_NEW_ARRAY(NLuint, M->m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->val = NL_NEW_ARRAY(double, nnz);
truncated = truncated || (
fread(M->rowptr, sizeof(NLuint), M->m+1, f) != M->m+1 ||
fread(M->colind, sizeof(NLuint), nnz, f) != nnz ||
fread(M->val, sizeof(double), nnz, f) != nnz
);
}
if(truncated) {
nlError("nlCRSMatrixSave", "File appears to be truncated");
NL_DELETE_ARRAY(M->rowptr);
NL_DELETE_ARRAY(M->colind);
NL_DELETE_ARRAY(M->val);
return NL_FALSE;
} else {
M->nslices = 1;
M->sliceptr = NL_NEW_ARRAY(NLuint, M->nslices+1);
M->sliceptr[0] = 0;
M->sliceptr[1] = M->m;
}
fclose(f);
return NL_TRUE;
#endif
}
NLuint_big nlCRSMatrixNNZ(NLCRSMatrix* M) {
return M->rowptr[M->m];
}
static void nlCRSMatrixMultSlice(
NLCRSMatrix* M, const double* x, double* y, NLuint Ibegin, NLuint Iend
) {
NLuint i;
NLuint_big j;
for(i=Ibegin; i<Iend; ++i) {
double sum=0.0;
for(j=M->rowptr[i]; j<M->rowptr[i+1]; ++j) {
sum += M->val[j] * x[M->colind[j]];
}
y[i] = sum;
}
}
/**
* \brief Computes a matrix-vector product
* \param[in] M a pointer to the matrix
* \param[in] x the vector to be multiplied, size = A->n
* \param[in] y where to store the result, size = A->m
* \relates NLSparseMatrix
*/
static void nlCRSMatrixMult(
NLCRSMatrix* M, const double* x, double* y
) {
int slice;
int nslices = (int)(M->nslices);
NLuint i,j;
NLuint_big jj;
NLdouble a;
if(M->symmetric_storage) {
for(i=0; i<M->m; ++i) {
y[i] = 0.0;
}
for(i=0; i<M->m; ++i) {
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
a = M->val[jj];
j = M->colind[jj];
y[i] += a * x[j];
if(j != i) {
y[j] += a * x[i];
}
}
}
} else {
#if defined(_OPENMP)
#pragma omp parallel for private(slice)
#endif
for(slice=0; slice<nslices; ++slice) {
nlCRSMatrixMultSlice(
M,x,y,M->sliceptr[slice],M->sliceptr[slice+1]
);
}
}
nlHostBlas()->flops += (NLulong)(2*nlCRSMatrixNNZ(M));
}
void nlCRSMatrixConstruct(
NLCRSMatrix* M, NLuint m, NLuint n, NLuint_big nnz, NLuint nslices
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = nslices;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint_big, m+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructSymmetric(
NLCRSMatrix* M, NLuint n, NLuint_big nnz
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NL_NEW_ARRAY(double, nnz);
M->rowptr = NL_NEW_ARRAY(NLuint_big, n+1);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixConstructPattern(
NLCRSMatrix* M, NLuint m, NLuint n
) {
M->m = m;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
if(NLMultMatrixVector_MKL != NULL) {
M->mult_func = (NLMultMatrixVectorFunc)NLMultMatrixVector_MKL;
} else {
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
}
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint_big, m+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_FALSE;
}
void nlCRSMatrixConstructPatternSymmetric(
NLCRSMatrix* M, NLuint n
) {
M->m = n;
M->n = n;
M->type = NL_MATRIX_CRS;
M->destroy_func = (NLDestroyMatrixFunc)nlCRSMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlCRSMatrixMult;
M->nslices = 0;
M->val = NULL;
M->rowptr = NL_NEW_ARRAY(NLuint_big, n+1);
M->colind = NULL;
M->sliceptr = NULL;
M->symmetric_storage = NL_TRUE;
}
void nlCRSMatrixPatternSetRowLength(
NLCRSMatrix* M, NLuint i, NLuint n
) {
nl_assert(i < M->m);
nl_assert(n <= M->n);
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
/* Store row length in rowptr */
M->rowptr[i+1] = (NLuint_big)(n);
}
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS);
void nlCRSMatrixComputeSlices(NLCRSMatrix* CRS) {
NLuint_big slice_size = CRS->rowptr[CRS->m] / (NLuint_big)(CRS->nslices);
NLuint slice, cur_row;
NLuint_big cur_bound, cur_NNZ;
/* Create "slices" to be used by parallel sparse matrix vector product */
if(CRS->sliceptr != NULL) {
cur_bound = slice_size;
cur_NNZ = 0;
cur_row = 0;
CRS->sliceptr[0]=0;
for(slice=1; slice<CRS->nslices; ++slice) {
while(cur_NNZ < cur_bound && cur_row < CRS->m) {
++cur_row;
cur_NNZ += CRS->rowptr[cur_row+1] - CRS->rowptr[cur_row];
}
CRS->sliceptr[slice] = cur_row;
cur_bound += slice_size;
}
CRS->sliceptr[CRS->nslices]=CRS->m;
}
}
void nlCRSMatrixPatternCompile(NLCRSMatrix* M) {
NLuint nslices = nlGetNumThreads();
NLuint i;
NLuint_big nnz;
NLuint k;
/* Test that matrix is in 'pattern' state */
nl_assert(M->colind == NULL);
nl_assert(M->val == NULL);
for(i=0; i<M->m; ++i) {
M->rowptr[i+1] += M->rowptr[i];
}
nnz = M->rowptr[M->m];
M->val = NL_NEW_ARRAY(double, nnz);
M->colind = NL_NEW_ARRAY(NLuint, nnz);
for(k=0; k<nnz; ++k) {
M->colind[k] = (NLuint)(-1);
}
M->sliceptr = NL_NEW_ARRAY(NLuint, nslices+1);
M->nslices = nslices;
nlCRSMatrixComputeSlices(M);
}
void nlCRSMatrixAdd(
NLCRSMatrix* M, NLuint i, NLuint j, NLdouble value
) {
NLuint_big jj;
/* Test that matrix is in 'compiled' state */
nl_assert(M->colind != NULL);
nl_assert(M->val != NULL);
nl_assert(i < M->m);
nl_assert(j < M->n);
if(M->symmetric_storage && j > i) {
return;
}
for(jj=M->rowptr[i]; jj<M->rowptr[i+1]; ++jj) {
if(M->colind[jj] == j) {
M->val[jj] += value;
return;
} else if(M->colind[jj] == (NLuint)(-1)) {
M->colind[jj] = j;
M->val[jj] += value;
return;
}
}
/* If this line is reached, it means that too many coefficients
* were added to row j, i.e. a number of coefficients larger than
* the row length previously declared with nlCRSMatrixPatternSetRowLength()
*/
nl_assert_not_reached;
}
/******************************************************************************/
/* SparseMatrix data structure */
static void nlSparseMatrixDestroyRowColumns(NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnDestroy(&(M->row[i]));
}
NL_DELETE_ARRAY(M->row);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnDestroy(&(M->column[i]));
}
NL_DELETE_ARRAY(M->column);
}
M->storage = (NLenum)((int)(M->storage) & ~NL_MATRIX_STORE_COLUMNS);
}
void nlSparseMatrixDestroy(NLSparseMatrix* M) {
nl_assert(M->type == NL_MATRIX_SPARSE_DYNAMIC);
nlSparseMatrixDestroyRowColumns(M);
NL_DELETE_ARRAY(M->diag);
#ifdef NL_PARANOID
NL_CLEAR(NLSparseMatrix,M);
#endif
}
void nlSparseMatrixAdd(NLSparseMatrix* M, NLuint i, NLuint j, NLdouble value) {
nl_parano_range_assert(i, 0, M->m - 1);
nl_parano_range_assert(j, 0, M->n - 1);
if((M->storage & NL_MATRIX_STORE_SYMMETRIC) && (j > i)) {
return;
}
if(i == j) {
M->diag[i] += value;
}
if(M->storage & NL_MATRIX_STORE_ROWS) {
nlRowColumnAdd(&(M->row[i]), j, value);
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
nlRowColumnAdd(&(M->column[j]), i, value);
}
}
static void nlSparseMatrixAddSparseMatrix(
NLSparseMatrix* M, double mul, const NLSparseMatrix* N
) {
NLuint i,j,ii,jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->storage & NL_MATRIX_STORE_SYMMETRIC);
}
if(N->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<N->m; ++i) {
for(jj=0; jj<N->row[i].size; ++jj) {
nlSparseMatrixAdd(
M,
i, N->row[i].coeff[jj].index,
mul*N->row[i].coeff[jj].value
);
}
}
} else {
nl_assert(N->storage & NL_MATRIX_STORE_COLUMNS);
for(j=0; j<N->n; ++j) {
for(ii=0; ii<N->column[j].size; ++ii) {
nlSparseMatrixAdd(
M,
N->column[j].coeff[ii].index, j,
mul*N->column[j].coeff[ii].value
);
}
}
}
}
static void nlSparseMatrixAddCRSMatrix(
NLSparseMatrix* M, double mul, const NLCRSMatrix* N
) {
NLuint i;
NLuint_big jj;
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
for(i=0; i<M->m; ++i) {
for(jj=N->rowptr[i]; jj<N->rowptr[i+1]; ++jj) {
nlSparseMatrixAdd(
M,
i,
N->colind[jj],
mul*N->val[jj]
);
}
}
}
void nlSparseMatrixAddMatrix(
NLSparseMatrix* M, double mul, const NLMatrix N
) {
nl_assert(M->m == N->m);
nl_assert(M->n == N->n);
if(N->type == NL_MATRIX_SPARSE_DYNAMIC) {
nlSparseMatrixAddSparseMatrix(M, mul, (const NLSparseMatrix*)N);
} else if(N->type == NL_MATRIX_CRS) {
nlSparseMatrixAddCRSMatrix(M, mul, (const NLCRSMatrix*)N);
} else {
nl_assert_not_reached;
}
}
void nlSparseMatrixZero( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnZero(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnZero(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
void nlSparseMatrixClear( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i=0; i<M->m; i++) {
nlRowColumnClear(&(M->row[i]));
}
}
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i=0; i<M->n; i++) {
nlRowColumnClear(&(M->column[i]));
}
}
NL_CLEAR_ARRAY(NLdouble, M->diag, M->diag_size);
}
/* Returns the number of non-zero coefficients */
NLuint_big nlSparseMatrixNNZ( NLSparseMatrix* M) {
NLuint_big nnz = 0;
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nnz += (NLuint_big)(M->row[i].size);
}
} else if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nnz += (NLuint_big)(M->column[i].size);
}
} else {
nl_assert_not_reached;
}
return nnz;
}
void nlSparseMatrixSort( NLSparseMatrix* M) {
NLuint i;
if(M->storage & NL_MATRIX_STORE_ROWS) {
for(i = 0; i<M->m; i++) {
nlRowColumnSort(&(M->row[i]));
}
}
if (M->storage & NL_MATRIX_STORE_COLUMNS) {
for(i = 0; i<M->n; i++) {
nlRowColumnSort(&(M->column[i]));
}
}
}
void nlSparseMatrixMAddRow(
NLSparseMatrix* M, NLuint i1, double s, NLuint i2
) {
NLuint jj;
NLRowColumn* Ri2 = &(M->row[i2]);
NLCoeff* c = NULL;
nl_debug_assert(i1 < M->m);
nl_debug_assert(i2 < M->m);
for(jj=0; jj<Ri2->size; ++jj) {
c = &(Ri2->coeff[jj]);
nlSparseMatrixAdd(M, i1, c->index, s*c->value);
}
}
void nlSparseMatrixScaleRow(
NLSparseMatrix* M, NLuint i, double s
) {
NLuint jj;
NLRowColumn* Ri = &(M->row[i]);
NLCoeff* c = NULL;
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(!(M->storage & NL_MATRIX_STORE_COLUMNS));
nl_debug_assert(i < M->m);
for(jj=0; jj<Ri->size; ++jj) {
c = &(Ri->coeff[jj]);
c->value *= s;
}
if(i < M->diag_size) {
M->diag[i] *= s;
}
}
void nlSparseMatrixZeroRow(
NLSparseMatrix* M, NLuint i
) {
NLRowColumn* Ri = &(M->row[i]);
nl_debug_assert(i < M->m);
Ri->size = 0;
if(i < M->diag_size) {
M->diag[i] = 0.0;
}
}
/*****************************************************************************/
/* SparseMatrix x Vector routines, internal helper routines */
static void nlSparseMatrix_mult_rows_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint m = A->m;
NLuint i,ij;
NLCoeff* c = NULL;
for(i=0; i<m; i++) {
NLRowColumn* Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<Ri->size; ++ij) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
if(i != c->index) {
y[c->index] += c->value * x[i];
}
}
}
}
static void nlSparseMatrix_mult_rows(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
/*
* Note: OpenMP does not like unsigned ints
* (causes some floating point exceptions),
* therefore I use here signed ints for all
* indices.
*/
int m = (int)(A->m);
int i,ij;
NLCoeff* c = NULL;
NLRowColumn* Ri = NULL;
#if defined(_OPENMP)
#pragma omp parallel for private(i,ij,c,Ri)
#endif
for(i=0; i<m; i++) {
Ri = &(A->row[i]);
y[i] = 0;
for(ij=0; ij<(int)(Ri->size); ij++) {
c = &(Ri->coeff[ij]);
y[i] += c->value * x[c->index];
}
}
}
static void nlSparseMatrix_mult_cols_symmetric(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
y[j] = 0;
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
if(j != c->index) {
y[j] += c->value * x[c->index];
}
}
}
}
static void nlSparseMatrix_mult_cols(
NLSparseMatrix* A,
const NLdouble* x,
NLdouble* y
) {
NLuint n = A->n;
NLuint j,ii;
NLCoeff* c = NULL;
NL_CLEAR_ARRAY(NLdouble, y, A->m);
for(j=0; j<n; j++) {
NLRowColumn* Cj = &(A->column[j]);
for(ii=0; ii<Cj->size; ii++) {
c = &(Cj->coeff[ii]);
y[c->index] += c->value * x[j];
}
}
}
void nlSparseMatrixMult(
NLSparseMatrix* A, const NLdouble* x, NLdouble* y
) {
nl_assert(A->type == NL_MATRIX_SPARSE_DYNAMIC);
if(A->storage & NL_MATRIX_STORE_ROWS) {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_rows_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_rows(A, x, y);
}
} else {
if(A->storage & NL_MATRIX_STORE_SYMMETRIC) {
nlSparseMatrix_mult_cols_symmetric(A, x, y);
} else {
nlSparseMatrix_mult_cols(A, x, y);
}
}
nlHostBlas()->flops += (NLulong)(2*nlSparseMatrixNNZ(A));
}
NLMatrix nlSparseMatrixNew(
NLuint m, NLuint n, NLenum storage
) {
NLSparseMatrix* result = NL_NEW(NLSparseMatrix);
nlSparseMatrixConstruct(result, m, n, storage);
return (NLMatrix)result;
}
void nlSparseMatrixConstruct(
NLSparseMatrix* M, NLuint m, NLuint n, NLenum storage
) {
NLuint i;
M->m = m;
M->n = n;
M->type = NL_MATRIX_SPARSE_DYNAMIC;
M->destroy_func = (NLDestroyMatrixFunc)nlSparseMatrixDestroy;
M->mult_func = (NLMultMatrixVectorFunc)nlSparseMatrixMult;
M->storage = storage;
if(storage & NL_MATRIX_STORE_ROWS) {
M->row = NL_NEW_ARRAY(NLRowColumn, m);
M->row_capacity = m;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->row[i]));
}
} else {
M->row = NULL;
M->row_capacity = 0;
}
if(storage & NL_MATRIX_STORE_COLUMNS) {
M->column = NL_NEW_ARRAY(NLRowColumn, n);
M->column_capacity = n;
for(i=0; i<n; i++) {
nlRowColumnConstruct(&(M->column[i]));
}
} else {
M->column = NULL;
M->column_capacity = 0;
}
M->diag_size = MIN(m,n);
M->diag_capacity = M->diag_size;
M->diag = NL_NEW_ARRAY(NLdouble, M->diag_size);
}
/**
* \brief Adjusts the size of the diagonal of
* an NLSparseMatrix after the number of rows or c
* olumns have changed.
* \param[in,out] M a pointer to the sparse matrix.
*/
static void adjust_diag(NLSparseMatrix* M) {
NLuint new_diag_size = MIN(M->m, M->n);
NLuint i;
if(new_diag_size > M->diag_size) {
if(new_diag_size > M->diag_capacity) {
M->diag_capacity *= 2;
if(M->diag_capacity == 0) {
M->diag_capacity = 16;
}
M->diag = NL_RENEW_ARRAY(double, M->diag, M->diag_capacity);
for(i=M->diag_size; i<new_diag_size; ++i) {
M->diag[i] = 0.0;
}
}
M->diag_size= new_diag_size;
}
}
void nlSparseMatrixAddRow( NLSparseMatrix* M) {
++M->m;
if(M->storage & NL_MATRIX_STORE_ROWS) {
if(M->m > M->row_capacity) {
M->row_capacity *= 2;
if(M->row_capacity == 0) {
M->row_capacity = 16;
}
M->row = NL_RENEW_ARRAY(
NLRowColumn, M->row, M->row_capacity
);
}
nlRowColumnConstruct(&(M->row[M->m-1]));
}
adjust_diag(M);
}
void nlSparseMatrixAddColumn( NLSparseMatrix* M) {
++M->n;
if(M->storage & NL_MATRIX_STORE_COLUMNS) {
if(M->n > M->column_capacity) {
M->column_capacity *= 2;
if(M->column_capacity == 0) {
M->column_capacity = 16;
}
M->column = NL_RENEW_ARRAY(
NLRowColumn, M->column, M->column_capacity
);
}
nlRowColumnConstruct(&(M->column[M->n-1]));
}
adjust_diag(M);
}
/*****************************************************************/
NLMatrix nlCRSMatrixNewFromSparseMatrix(NLSparseMatrix* M) {
NLuint_big nnz = nlSparseMatrixNNZ(M);
NLuint nslices = nlGetNumThreads();
NLuint i,ij,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nl_assert(M->m == M->n);
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
} else {
nlCRSMatrixConstruct(CRS, M->m, M->n, nnz, nslices);
}
nlSparseMatrixSort(M);
/* Convert matrix to CRS format */
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(ij=0; ij<Ri->size; ij++) {
NLCoeff* c = &(Ri->coeff[ij]);
CRS->val[k] = c->value;
CRS->colind[k] = c->index;
++k;
}
}
CRS->rowptr[M->m] = k;
nlCRSMatrixComputeSlices(CRS);
return (NLMatrix)CRS;
}
NLMatrix nlCRSMatrixNewFromSparseMatrixSymmetric(NLSparseMatrix* M) {
NLuint_big nnz;
NLuint i,j,jj,k;
NLCRSMatrix* CRS = NL_NEW(NLCRSMatrix);
nl_assert(M->storage & NL_MATRIX_STORE_ROWS);
nl_assert(M->m == M->n);
nlSparseMatrixSort(M);
if(M->storage & NL_MATRIX_STORE_SYMMETRIC) {
nnz = nlSparseMatrixNNZ(M);
} else {
nnz = 0;
for(i=0; i<M->n; ++i) {
NLRowColumn* Ri = &M->row[i];
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if(j <= i) {
++nnz;
}
}
}
}
nlCRSMatrixConstructSymmetric(CRS, M->n, nnz);
k=0;
for(i=0; i<M->m; ++i) {
NLRowColumn* Ri = &(M->row[i]);
CRS->rowptr[i] = k;
for(jj=0; jj<Ri->size; ++jj) {
j = Ri->coeff[jj].index;
if((M->storage & NL_MATRIX_STORE_SYMMETRIC)) {
nl_debug_assert(j <= i);
}
if(j <= i) {
CRS->val[k] = Ri->coeff[jj].value;
CRS->colind[k] = j;
++k;
}
}
}
CRS->rowptr[M->m] = k;
return (NLMatrix)CRS;
}
void nlMatrixCompress(NLMatrix* M) {
NLMatrix result = NULL;
if(
(*M)->type == NL_MATRIX_CRS &&
nlExtensionIsInitialized_MKL()
) {
result = nlMKLMatrixNewFromCRSMatrix((NLCRSMatrix*)*M);
nlDeleteMatrix(*M);
*M = result;
return;
}
if((*M)->type != NL_MATRIX_SPARSE_DYNAMIC) {
return;
}
if(nlExtensionIsInitialized_MKL()) {
result = nlMKLMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
} else {
result = nlCRSMatrixNewFromSparseMatrix((NLSparseMatrix*)*M);
}
nlDeleteMatrix(*M);
*M = result;
}
NLuint_big nlMatrixNNZ(NLMatrix M) {
if(M->type == NL_MATRIX_SPARSE_DYNAMIC) {
return nlSparseMatrixNNZ((NLSparseMatrix*)M);
} else if(M->type == NL_MATRIX_CRS) {
return nlCRSMatrixNNZ((NLCRSMatrix*)M);
}
return (NLuint_big)(M->m) * (NLuint_big)(M->n);
}
NLMatrix nlMatrixFactorize(NLMatrix M, NLenum solver) {
NLMatrix result = NULL;
switch(solver) {
case NL_SUPERLU_EXT:
case NL_PERM_SUPERLU_EXT:
case NL_SYMMETRIC_SUPERLU_EXT:
result = nlMatrixFactorize_SUPERLU(M,solver);
break;
case NL_CHOLMOD_EXT:
result = nlMatrixFactorize_CHOLMOD(M,solver);
break;
default:
nlError("nlMatrixFactorize","unknown solver");
}
return result;
}
/*****************************************************************/
/**
* \brief A matrix class implemented by a function.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief Matrix type
* \details One of NL_MATRIX_SPARSE_DYNAMIC,
* NL_MATRIX_CRS, NL_MATRIX_SUPERLU_EXT,
* NL_MATRIX_CHOLDMOD_EXT, NL_MATRIX_FUNCTION,
* NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
} NLFunctionMatrix;
static void nlFunctionMatrixDestroy(NLFunctionMatrix* M) {
(void)M; /* to avoid 'unused parameter' warning */
/*
* Nothing special to do,
* there is no dynamic allocated mem.
*/
}
static void nlFunctionMatrixMult(
NLFunctionMatrix* M, const NLdouble* x, NLdouble* y
) {
M->matrix_func(x,y);
}
NLMatrix nlMatrixNewFromFunction(NLuint m, NLuint n, NLMatrixFunc func) {
NLFunctionMatrix* result = NL_NEW(NLFunctionMatrix);
result->m = m;
result->n = n;
result->type = NL_MATRIX_FUNCTION;
result->destroy_func = (NLDestroyMatrixFunc)nlFunctionMatrixDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlFunctionMatrixMult;
result->matrix_func = func;
return (NLMatrix)result;
}
NLMatrixFunc nlMatrixGetFunction(NLMatrix M) {
if(M == NULL) {
return NULL;
}
if(M->type != NL_MATRIX_FUNCTION) {
return NULL;
}
return ((NLFunctionMatrix*)M)->matrix_func;
}
/******************************************************************************/
/**
* \brief A matrix class that implements the product between two matrices.
*/
typedef struct {
/**
* \brief number of rows
*/
NLuint m;
/**
* \brief number of columns
*/
NLuint n;
/**
* \brief matrix type, NL_MATRIX_OTHER
*/
NLenum type;
/**
* \brief Destructor
*/
NLDestroyMatrixFunc destroy_func;
/**
* \brief Matrix x vector product (abstract matrix API,
* takes matrix, rhs and lhs)
*/
NLMultMatrixVectorFunc mult_func;
/**
* \brief Matrix x vector product (user API, only takes
* rhs and lhs).
*/
NLMatrixFunc matrix_func;
/**
* \brief First matrix of the product.
*/
NLMatrix M;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_M;
/**
* \brief Second matrix of the product.
*/
NLMatrix N;
/**
* \brief NL_TRUE if memory ownership was transferred,
* NL_FALSE otherwise.
*/
NLboolean owns_N;
/**
* \brief A temporary vector of dimension N->m (= M->n)
*/
NLdouble* work;
} NLMatrixProduct;
static void nlMatrixProductDestroy(NLMatrixProduct* P) {
NL_DELETE_ARRAY(P->work);
if(P->owns_M) {
nlDeleteMatrix(P->M); P->M = NULL;
}
if(P->owns_N) {
nlDeleteMatrix(P->N); P->N = NULL;
}
}
static void nlMatrixProductMult(
NLMatrixProduct* P, const NLdouble* x, NLdouble* y
) {
nlMultMatrixVector(P->N, x, P->work);
nlMultMatrixVector(P->M, P->work, y);
}
NLMatrix nlMatrixNewFromProduct(
NLMatrix M, NLboolean owns_M, NLMatrix N, NLboolean owns_N
) {
NLMatrixProduct* result = NL_NEW(NLMatrixProduct);
nl_assert(M->n == N->m);
result->m = M->m;
result->n = N->n;
result->type = NL_MATRIX_OTHER;
result->work = NL_NEW_ARRAY(NLdouble,N->m);
result->destroy_func = (NLDestroyMatrixFunc)nlMatrixProductDestroy;
result->mult_func = (NLMultMatrixVectorFunc)nlMatrixProductMult;
result->M = M;
result->owns_M = owns_M;
result->N = N;
result->owns_N = owns_N;
return (NLMatrix)result;
}
/******************************************************************************/
|
GB_binop__iseq_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__iseq_int64
// A.*B function (eWiseMult): GB_AemultB__iseq_int64
// A*D function (colscale): GB_AxD__iseq_int64
// D*A function (rowscale): GB_DxB__iseq_int64
// C+=B function (dense accum): GB_Cdense_accumB__iseq_int64
// C+=b function (dense accum): GB_Cdense_accumb__iseq_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_int64
// C=scalar+B GB_bind1st__iseq_int64
// C=scalar+B' GB_bind1st_tran__iseq_int64
// C=A+scalar GB_bind2nd__iseq_int64
// C=A'+scalar GB_bind2nd_tran__iseq_int64
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = (aij == bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int64_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x == y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISEQ || GxB_NO_INT64 || GxB_NO_ISEQ_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__iseq_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__iseq_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__iseq_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__iseq_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = Bx [p] ;
Cx [p] = (x == bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__iseq_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = (aij == y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (x == aij) ; \
}
GrB_Info GB_bind1st_tran__iseq_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = (aij == y) ; \
}
GrB_Info GB_bind2nd_tran__iseq_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__abs_int16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_int16_uint64
// op(A') function: GB_tran__abs_int16_uint64
// C type: int16_t
// A type: uint64_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = GB_IABS (aij)
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_IABS (x) ;
// casting
#define GB_CASTING(z, x) \
int16_t z = (int16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_int16_uint64
(
int16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_int16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint16_uint64.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint16_uint64
// op(A') function: GB_tran__ainv_uint16_uint64
// C type: uint16_t
// A type: uint64_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint64_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, x) \
uint16_t z = (uint16_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT16 || GxB_NO_UINT64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint16_uint64
(
uint16_t *restrict Cx,
const uint64_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint16_uint64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__ainv_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__ainv_uint8_uint16
// op(A') function: GB_tran__ainv_uint8_uint16
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = -aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = -x ;
// casting
#define GB_CASTING(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_AINV || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__ainv_uint8_uint16
(
uint8_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__ainv_uint8_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
host_as_target.c | // Check that specifying device as omp_get_initial_device():
// - Doesn't cause the runtime to fail.
// - Offloads code to the host.
// - Doesn't transfer data. In this case, just check that neither host data nor
// default device data are affected by the specified transfers.
// - Works whether it's specified directly or as the default device.
// RUN: %libomptarget-compile-run-and-check-aarch64-unknown-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-powerpc64le-ibm-linux-gnu
// RUN: %libomptarget-compile-run-and-check-x86_64-pc-linux-gnu
// RUN: %libomptarget-compile-run-and-check-nvptx64-nvidia-cuda
#include <stdio.h>
#include <omp.h>
static void check(char *X, int Dev) {
printf(" host X = %c\n", *X);
#pragma omp target device(Dev)
printf("device X = %c\n", *X);
}
#define CHECK_DATA() check(&X, DevDefault)
int main(void) {
int DevDefault = omp_get_default_device();
int DevInit = omp_get_initial_device();
//--------------------------------------------------
// Initialize data on the host and default device.
//--------------------------------------------------
// CHECK: host X = h
// CHECK-NEXT: device X = d
char X = 'd';
#pragma omp target enter data map(to:X)
X = 'h';
CHECK_DATA();
//--------------------------------------------------
// Check behavior when specifying host directly.
//--------------------------------------------------
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target device(DevInit) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams device(DevInit) num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams device(DevInit) num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data device(DevInit) map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data device(DevInit) map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data device(DevInit) map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update device(DevInit) from(X)
;
CHECK_DATA();
//--------------------------------------------------
// Check behavior when device defaults to host.
//--------------------------------------------------
omp_set_default_device(DevInit);
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// CHECK-NEXT: omp_is_initial_device() = 1
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target teams num_teams(1) map(always,tofrom:X)
printf("omp_is_initial_device() = %d\n", omp_is_initial_device());
CHECK_DATA();
// Check that __kmpc_push_target_tripcount_mapper doesn't fail. I'm not sure
// how to check that it actually pushes to the initial device.
#pragma omp target teams num_teams(1)
#pragma omp distribute
for (int i = 0; i < 2; ++i)
;
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target data map(always,tofrom:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target enter data map(always,to:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target exit data map(always,from:X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update to(X)
;
CHECK_DATA();
// CHECK-NEXT: host X = h
// CHECK-NEXT: device X = d
#pragma omp target update from(X)
;
CHECK_DATA();
return 0;
}
|
PeptideIndexing.h | // --------------------------------------------------------------------------
// OpenMS -- Open-Source Mass Spectrometry
// --------------------------------------------------------------------------
// Copyright The OpenMS Team -- Eberhard Karls University Tuebingen,
// ETH Zurich, and Freie Universitaet Berlin 2002-2018.
//
// This software is released under a three-clause BSD license:
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
// * Neither the name of any author or any participating institution
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
// For a full list of authors, refer to the file AUTHORS.
// --------------------------------------------------------------------------
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL ANY OF THE AUTHORS OR THE CONTRIBUTING
// INSTITUTIONS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
// OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//
// --------------------------------------------------------------------------
// $Maintainer: Chris Bielow $
// $Authors: Andreas Bertsch, Chris Bielow $
// --------------------------------------------------------------------------
#pragma once
#include <OpenMS/ANALYSIS/ID/AhoCorasickAmbiguous.h>
#include <OpenMS/CHEMISTRY/ProteaseDigestion.h>
#include <OpenMS/CHEMISTRY/ProteaseDB.h>
#include <OpenMS/CONCEPT/LogStream.h>
#include <OpenMS/CONCEPT/ProgressLogger.h>
#include <OpenMS/DATASTRUCTURES/DefaultParamHandler.h>
#include <OpenMS/DATASTRUCTURES/FASTAContainer.h>
#include <OpenMS/DATASTRUCTURES/ListUtils.h>
#include <OpenMS/DATASTRUCTURES/StringUtils.h>
#include <OpenMS/DATASTRUCTURES/SeqanIncludeWrapper.h>
#include <OpenMS/FORMAT/FASTAFile.h>
#include <OpenMS/KERNEL/StandardTypes.h>
#include <OpenMS/METADATA/PeptideEvidence.h>
#include <OpenMS/METADATA/PeptideIdentification.h>
#include <OpenMS/METADATA/ProteinIdentification.h>
#include <OpenMS/SYSTEM/StopWatch.h>
#include <OpenMS/SYSTEM/SysInfo.h>
#include <atomic>
#include <algorithm>
#include <fstream>
namespace OpenMS
{
/**
@brief Refreshes the protein references for all peptide hits in a vector of PeptideIdentifications and adds target/decoy information.
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy". For proteins the possible values are "target" and "decoy",
depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string) as a suffix or prefix, respectively (see parameter @p prefix).
For peptides, the possible values are "target", "decoy" and "target+decoy", depending on whether the peptide sequence is found only in target proteins,
only in decoy proteins, or in both. The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool.
(For FDR calculations, "target+decoy" peptide hits count as target hits.)
@note Make sure that your protein names in the database contain a correctly formatted decoy string. This can be ensured by using @ref UTILS_DecoyDatabase.
If the decoy identifier is not recognized successfully all proteins will be assumed to stem from the target-part of the query.<br>
E.g., "sw|P33354_DECOY|YEHR_ECOLI Uncharacterized lipop..." is <b>invalid</b>, since the tool has no knowledge of how SwissProt entries are build up.
A correct identifier could be "DECOY_sw|P33354|YEHR_ECOLI Uncharacterized li ..." or "sw|P33354|YEHR_ECOLI_DECOY Uncharacterized li", depending on whether you are
using prefix or suffix annotation.<br>
Some helpful target/decoy statistics will be reported when done.
By default this tool will fail if an unmatched peptide occurs, i.e. if the database does not contain the corresponding protein.
You can force it to return successfully in this case by using the flag @p allow_unmatched.
Search engines (such as Mascot) will replace ambiguous amino acids ('B', 'J', 'Z' and 'X') in the protein database with unambiguous amino acids in the reported peptides, e.g. exchange 'X' with 'H'.
This will cause such peptides to not be found by exactly matching their sequences to the protein database.
However, we can recover these cases by using tolerant search for ambiguous amino acids in the protein sequence. This is done by default with up to four amino acids
per peptide hit. If you only want exact matches, set @p aaa_max to zero (but expect that unmatched peptides might occur)!
Leucine/Isoleucine:
Further complications can arise due to the presence of the isobaric amino acids isoleucine ('I') and leucine ('L') in protein sequences.
Since the two have the exact same chemical composition and mass, they generally cannot be distinguished by mass spectrometry.
If a peptide containing 'I' was reported as a match for a spectrum, a peptide containing 'L' instead would be an equally good match (and vice versa).
To account for this inherent ambiguity, setting the flag @p IL_equivalent causes 'I' and 'L' to be considered as indistinguishable.@n
For example, if the sequence "PEPTIDE" (matching "Protein1") was identified as a search hit,
but the database additionally contained "PEPTLDE" (matching "Protein2"), running PeptideIndexer with the @p IL_equivalent option would
report both "Protein1" and "Protein2" as accessions for "PEPTIDE".
(This is independent of ambiguous matching via @p aaa_max.)
Additionally, setting this flag will convert all 'J's in any protein sequence to 'I'. This way, no tolerant search is required for 'J' (but is still possible for all
the other ambiguous amino acids).
If @p write_protein_sequences is requested and @p IL_equivalent is set as well, both the I/L-version and unmodified protein sequences need to be stored internally.
This requires some extra memory, roughly equivalent to the size of the FASTA database file itself.
Enzyme specificity:
Once a peptide sequence is found in a protein sequence, this does <b>not</b> imply that the hit is valid! This is where enzyme specificity comes into play.
By default, we demand that the peptide is fully tryptic (i.e. the enzyme parameter is set to "trypsin" and specificity is "full").
So unless the peptide coincides with C- and/or N-terminus of the protein, the peptide's cleavage pattern should fulfill the trypsin cleavage rule [KR][^P].
We make two exceptions to the specificity constraints:
1) for peptides starting at the second or third position of a protein are still considered N-terminally specific,
since the residues can be cleaved off in vivo; X!Tandem reports these peptides. For example, the two peptides ABAR and LABAR would both match a protein starting with MLABAR.
2) adventitious cleavage at Asp|Pro (Aspartate/D | Proline/P) is allowed for all enzymes (as supported by X!Tandem), i.e. counts as a proper cleavage site (see http://www.thegpm.org/tandem/release.html).
You can relax the requirements further by choosing <tt>semi-tryptic</tt> (only one of two "internal" termini must match requirements)
or <tt>none</tt> (essentially allowing all hits, no matter their context). These settings should not be used (due to high risk of reporting false positives),
unless the search engine was instructed to search peptides in the same way.
The FASTA file should not contain duplicate protein accessions (since accessions are not validated) if a correct unique-matching annotation is important (target/decoy annotation is still correct).
Threading:
This tool support multiple threads (@p threads option) to speed up computation, at the cost of little extra memory.
*/
class OPENMS_DLLAPI PeptideIndexing :
public DefaultParamHandler, public ProgressLogger
{
public:
/// Exit codes
enum ExitCodes
{
EXECUTION_OK,
DATABASE_EMPTY,
PEPTIDE_IDS_EMPTY,
ILLEGAL_PARAMETERS,
UNEXPECTED_RESULT
};
/// Default constructor
PeptideIndexing();
/// Default destructor
~PeptideIndexing() override;
/// forward for old interface and pyOpenMS; use run<T>() for more control
inline ExitCodes run(std::vector<FASTAFile::FASTAEntry>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
FASTAContainer<TFI_Vector> protein_container(proteins);
return run<TFI_Vector>(protein_container, prot_ids, pep_ids);
}
/**
@brief Re-index peptide identifications honoring enzyme cutting rules, ambiguous amino acids and target/decoy hits.
Template parameter 'T' can be either TFI_File or TFI_Vector. If the data is already available, use TFI_Vector and pass the vector.
If the data is still in a FASTA file and its not needed afterwards for additional processing, use TFI_File and pass the filename.
PeptideIndexer refreshes target/decoy information and mapping of peptides to proteins.
The target/decoy information is crucial for the @ref TOPP_FalseDiscoveryRate tool. (For FDR calculations, "target+decoy" peptide hits count as target hits.)
PeptideIndexer allows for ambiguous amino acids (B|J|Z|X) in the protein database, but not in the peptide sequences.
For the latter only I/L can be treated as equivalent (see 'IL_equivalent' flag), but 'J' is not allowed.
Enzyme cutting rules and partial specificity can be specified.
Resulting protein hits appear in the order of the FASTA file, except for orphaned proteins, which will appear first with an empty target_decoy metavalue.
Duplicate protein accessions & sequences will not raise a warning, but create multiple hits (PeptideIndexer scans over the FASTA file once for efficiency
reasons, and thus might not see all accessions & sequences at once).
All peptide and protein hits are annotated with target/decoy information, using the meta value "target_decoy".
For proteins the possible values are "target" and "decoy", depending on whether the protein accession contains the decoy pattern (parameter @p decoy_string)
as a suffix or prefix, respectively (see parameter @p prefix).
Peptide hits are annotated with metavalue 'protein_references', and if matched to at least one protein also with metavalue 'target_decoy'.
The possible values for 'target_decoy' are "target", "decoy" and "target+decoy",
depending on whether the peptide sequence is found only in target proteins, only in decoy proteins, or in both. The metavalue is not present, if the peptide is unmatched.
Runtime: PeptideIndexer is usually very fast (loading and storing the data takes the most time) and search speed can be further improved (linearly), but using more threads.
Avoid allowing too many (>=4) ambiguous amino acids if your database contains long stretches of 'X' (exponential search space).
@param proteins A list of proteins -- either read piecewise from a FASTA file or as existing vector of FASTAEntries.
@param prot_ids Resulting protein identifications associated to pep_ids (will be re-written completely)
@param pep_ids Peptide identifications which should be search within @p proteins and then linked to @p prot_ids
@return Exit status codes.
*/
template<typename T>
ExitCodes run(FASTAContainer<T>& proteins, std::vector<ProteinIdentification>& prot_ids, std::vector<PeptideIdentification>& pep_ids)
{
// no decoy string provided? try to deduce from data
if (decoy_string_.empty())
{
auto r = DecoyHelper::findDecoyString(proteins);
proteins.reset();
if (!r.success)
{
r.is_prefix = true;
r.name = "DECOY_";
OPENMS_LOG_WARN << "Unable to determine decoy string automatically (not enough decoys were detected)! Using default " << (r.is_prefix ? "prefix" : "suffix") << " decoy string '" << r.name << "'\n"
<< "If you think that this is incorrect, please provide a decoy_string and its position manually!" << std::endl;
}
prefix_ = r.is_prefix;
decoy_string_ = r.name;
// decoy string and position was extracted successfully
OPENMS_LOG_INFO << "Using " << (prefix_ ? "prefix" : "suffix") << " decoy string '" << decoy_string_ << "'" << std::endl;
}
//---------------------------------------------------------------
// parsing parameters, correcting xtandem and MSGFPlus parameters
//---------------------------------------------------------------
ProteaseDigestion enzyme;
enzyme.setEnzyme(enzyme_name_);
enzyme.setSpecificity(enzyme.getSpecificityByName(enzyme_specificity_));
bool xtandem_fix_parameters = true, msgfplus_fix_parameters = true;
// specificity is none or semi? don't automate xtandem
if (enzyme.getSpecificity() == EnzymaticDigestion::SPEC_SEMI ||
enzyme.getSpecificity() == EnzymaticDigestion::SPEC_NONE)
{
xtandem_fix_parameters = false;
}
// determine if search engine is solely xtandem or MSGFPlus
for (const auto& prot_id : prot_ids)
{
String search_engine = prot_id.getSearchEngine();
StringUtils::toUpper(search_engine);
if (search_engine != "XTANDEM") { xtandem_fix_parameters = false; }
if (!(search_engine == "MSGFPLUS" || search_engine == "MS-GF+")) { msgfplus_fix_parameters = false; }
}
// solely MSGFPlus -> Trypsin/P as enzyme
if (msgfplus_fix_parameters && enzyme.getEnzymeName() == "Trypsin")
{
OPENMS_LOG_WARN << "MSGFPlus detected but enzyme cutting rules were set to Trypsin. Correcting to Trypsin/P to copy with special cutting rule in MSGFPlus." << std::endl;
enzyme.setEnzyme("Trypsin/P");
}
//-------------------------------------------------------------
// calculations
//-------------------------------------------------------------
// cache the first proteins
const size_t PROTEIN_CACHE_SIZE = 4e5; // 400k should be enough for most DB's and is not too hard on memory either (~200 MB FASTA)
this->startProgress(0, 1, "Load first DB chunk");
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
this->endProgress();
if (proteins.empty()) // we do not allow an empty database
{
OPENMS_LOG_ERROR << "Error: An empty database was provided. Mapping makes no sense. Aborting..." << std::endl;
return DATABASE_EMPTY;
}
if (pep_ids.empty()) // Aho-Corasick requires non-empty input; but we allow this case, since the TOPP tool should not crash when encountering a bad raw file (with no PSMs)
{
OPENMS_LOG_WARN << "Warning: An empty set of peptide identifications was provided. Output will be empty as well." << std::endl;
if (!keep_unreferenced_proteins_)
{
// delete only protein hits, not whole ID runs incl. meta data:
for (std::vector<ProteinIdentification>::iterator it = prot_ids.begin();
it != prot_ids.end(); ++it)
{
it->getHits().clear();
}
}
return PEPTIDE_IDS_EMPTY;
}
FoundProteinFunctor func(enzyme, xtandem_fix_parameters); // store the matches
Map<String, Size> acc_to_prot; // map: accessions --> FASTA protein index
std::vector<bool> protein_is_decoy; // protein index -> is decoy?
std::vector<std::string> protein_accessions; // protein index -> accession
bool invalid_protein_sequence = false; // check for proteins with modifications, i.e. '[' or '(', and throw an exception
{ // new scope - forget data after search
/*
BUILD Peptide DB
*/
bool has_illegal_AAs(false);
AhoCorasickAmbiguous::PeptideDB pep_DB;
for (std::vector<PeptideIdentification>::const_iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
//String run_id = it1->getIdentifier();
const std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::const_iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
//
// Warning:
// do not skip over peptides here, since the results are iterated in the same way
//
String seq = it2->getSequence().toUnmodifiedString().remove('*'); // make a copy, i.e. do NOT change the peptide sequence!
if (seqan::isAmbiguous(seqan::AAString(seq.c_str())))
{ // do not quit here, to show the user all sequences .. only quit after loop
OPENMS_LOG_ERROR << "Peptide sequence '" << it2->getSequence() << "' contains one or more ambiguous amino acids (B|J|Z|X).\n";
has_illegal_AAs = true;
}
if (IL_equivalent_) // convert L to I;
{
seq.substitute('L', 'I');
}
appendValue(pep_DB, seq.c_str());
}
}
if (has_illegal_AAs)
{
OPENMS_LOG_ERROR << "One or more peptides contained illegal amino acids. This is not allowed!"
<< "\nPlease either remove the peptide or replace it with one of the unambiguous ones (while allowing for ambiguous AA's to match the protein)." << std::endl;;
}
OPENMS_LOG_INFO << "Mapping " << length(pep_DB) << " peptides to " << (proteins.size() == PROTEIN_CACHE_SIZE ? "? (unknown number of)" : String(proteins.size())) << " proteins." << std::endl;
if (length(pep_DB) == 0)
{ // Aho-Corasick will crash if given empty needles as input
OPENMS_LOG_WARN << "Warning: Peptide identifications have no hits inside! Output will be empty as well." << std::endl;
return PEPTIDE_IDS_EMPTY;
}
/*
Aho Corasick (fast)
*/
OPENMS_LOG_INFO << "Searching with up to " << aaa_max_ << " ambiguous amino acid(s) and " << mm_max_ << " mismatch(es)!" << std::endl;
SysInfo::MemUsage mu;
OPENMS_LOG_INFO << "Building trie ...";
StopWatch s;
s.start();
AhoCorasickAmbiguous::FuzzyACPattern pattern;
AhoCorasickAmbiguous::initPattern(pep_DB, aaa_max_, mm_max_, pattern);
s.stop();
OPENMS_LOG_INFO << " done (" << int(s.getClockTime()) << "s)" << std::endl;
s.reset();
uint16_t count_j_proteins(0);
bool has_active_data = true; // becomes false if end of FASTA file is reached
const std::string jumpX(aaa_max_ + mm_max_ + 1, 'X'); // jump over stretches of 'X' which cost a lot of time; +1 because AXXA is a valid hit for aaa_max == 2 (cannot split it)
// use very large target value for progress if DB size is unknown (did not fit into first chunk)
this->startProgress(0, proteins.size() == PROTEIN_CACHE_SIZE ? std::numeric_limits<SignedSize>::max() : proteins.size(), "Aho-Corasick");
std::atomic<int> progress_prots(0);
#ifdef _OPENMP
#pragma omp parallel
#endif
{
FoundProteinFunctor func_threads(enzyme, xtandem_fix_parameters);
Map<String, Size> acc_to_prot_thread; // map: accessions --> FASTA protein index
AhoCorasickAmbiguous fuzzyAC;
String prot;
while (true)
{
#pragma omp barrier // all threads need to be here, since we are about to swap protein data
#pragma omp single
{
DEBUG_ONLY std::cerr << " activating cache ...\n";
has_active_data = proteins.activateCache(); // swap in last cache
protein_accessions.resize(proteins.getChunkOffset() + proteins.chunkSize());
} // implicit barrier here
if (!has_active_data) break; // leave while-loop
SignedSize prot_count = (SignedSize)proteins.chunkSize();
#pragma omp master
{
DEBUG_ONLY std::cerr << "Filling Protein Cache ...";
proteins.cacheChunk(PROTEIN_CACHE_SIZE);
protein_is_decoy.resize(proteins.getChunkOffset() + prot_count);
for (SignedSize i = 0; i < prot_count; ++i)
{ // do this in master only, to avoid false sharing
const String& seq = proteins.chunkAt(i).identifier;
protein_is_decoy[i + proteins.getChunkOffset()] = (prefix_ ? seq.hasPrefix(decoy_string_) : seq.hasSuffix(decoy_string_));
}
DEBUG_ONLY std::cerr << " done" << std::endl;
}
DEBUG_ONLY std::cerr << " starting for loop \n";
// search all peptides in each protein
#pragma omp for schedule(dynamic, 100) nowait
for (SignedSize i = 0; i < prot_count; ++i)
{
++progress_prots; // atomic
if (omp_get_thread_num() == 0)
{
this->setProgress(progress_prots);
}
prot = proteins.chunkAt(i).sequence;
prot.remove('*');
// check for invalid sequences with modifications
if (prot.has('[') || prot.has('('))
{
invalid_protein_sequence = true; // not omp-critical because its write-only
// we cannot throw an exception here, since we'd need to catch it within the parallel region
}
// convert L/J to I; also replace 'J' in proteins
if (IL_equivalent_)
{
prot.substitute('L', 'I');
prot.substitute('J', 'I');
}
else
{ // warn if 'J' is found (it eats into aaa_max)
if (prot.has('J'))
{
#pragma omp atomic
++count_j_proteins;
}
}
Size prot_idx = i + proteins.getChunkOffset();
// test if protein was a hit
Size hits_total = func_threads.filter_passed + func_threads.filter_rejected;
// check if there are stretches of 'X'
if (prot.has('X'))
{
// create chunks of the protein (splitting it at stretches of 'X..X') and feed them to AC one by one
size_t offset = -1, start = 0;
while ((offset = prot.find(jumpX, offset + 1)) != std::string::npos)
{
//std::cout << "found X..X at " << offset << " in protein " << proteins[i].identifier << "\n";
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start, offset + jumpX.size() - start), prot, prot_idx, (int)start, func_threads);
// skip ahead while we encounter more X...
while (offset + jumpX.size() < prot.size() && prot[offset + jumpX.size()] == 'X') ++offset;
start = offset;
//std::cout << " new start: " << start << "\n";
}
// last chunk
if (start < prot.size())
{
addHits_(fuzzyAC, pattern, pep_DB, prot.substr(start), prot, prot_idx, (int)start, func_threads);
}
}
else
{
addHits_(fuzzyAC, pattern, pep_DB, prot, prot, prot_idx, 0, func_threads);
}
// was protein found?
if (hits_total < func_threads.filter_passed + func_threads.filter_rejected)
{
protein_accessions[prot_idx] = proteins.chunkAt(i).identifier;
acc_to_prot_thread[protein_accessions[prot_idx]] = prot_idx;
}
} // end parallel FOR
// join results again
DEBUG_ONLY std::cerr << " critical now \n";
#ifdef _OPENMP
#pragma omp critical(PeptideIndexer_joinAC)
#endif
{
s.start();
// hits
func.merge(func_threads);
// accession -> index
acc_to_prot.insert(acc_to_prot_thread.begin(), acc_to_prot_thread.end());
acc_to_prot_thread.clear();
s.stop();
} // OMP end critical
} // end readChunk
} // OMP end parallel
this->endProgress();
std::cout << "Merge took: " << s.toString() << "\n";
mu.after();
std::cout << mu.delta("Aho-Corasick") << "\n\n";
OPENMS_LOG_INFO << "\nAho-Corasick done:\n found " << func.filter_passed << " hits for " << func.pep_to_prot.size() << " of " << length(pep_DB) << " peptides.\n";
// write some stats
OPENMS_LOG_INFO << "Peptide hits passing enzyme filter: " << func.filter_passed << "\n"
<< " ... rejected by enzyme filter: " << func.filter_rejected << std::endl;
if (count_j_proteins)
{
OPENMS_LOG_WARN << "PeptideIndexer found " << count_j_proteins << " protein sequences in your database containing the amino acid 'J'."
<< "To match 'J' in a protein, an ambiguous amino acid placeholder for I/L will be used.\n"
<< "This costs runtime and eats into the 'aaa_max' limit, leaving less opportunity for B/Z/X matches.\n"
<< "If you want 'J' to be treated as unambiguous, enable '-IL_equivalent'!" << std::endl;
}
} // end local scope
//
// do mapping
//
// index existing proteins
Map<String, Size> runid_to_runidx; // identifier to index
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
runid_to_runidx[prot_ids[run_idx].getIdentifier()] = run_idx;
}
// for peptides --> proteins
Size stats_matched_unique(0);
Size stats_matched_multi(0);
Size stats_unmatched(0); // no match to DB
Size stats_count_m_t(0); // match to Target DB
Size stats_count_m_d(0); // match to Decoy DB
Size stats_count_m_td(0); // match to T+D DB
Map<Size, std::set<Size> > runidx_to_protidx; // in which protID do appear which proteins (according to mapped peptides)
Size pep_idx(0);
for (std::vector<PeptideIdentification>::iterator it1 = pep_ids.begin(); it1 != pep_ids.end(); ++it1)
{
// which ProteinIdentification does the peptide belong to?
Size run_idx = runid_to_runidx[it1->getIdentifier()];
std::vector<PeptideHit>& hits = it1->getHits();
for (std::vector<PeptideHit>::iterator it2 = hits.begin(); it2 != hits.end(); ++it2)
{
// clear protein accessions
it2->setPeptideEvidences(std::vector<PeptideEvidence>());
//
// is this a decoy hit?
//
bool matches_target(false);
bool matches_decoy(false);
std::set<Size> prot_indices; /// protein hits of this peptide
// add new protein references
for (std::set<PeptideProteinMatchInformation>::const_iterator it_i = func.pep_to_prot[pep_idx].begin();
it_i != func.pep_to_prot[pep_idx].end(); ++it_i)
{
prot_indices.insert(it_i->protein_index);
const String& accession = protein_accessions[it_i->protein_index];
PeptideEvidence pe(accession, it_i->position, it_i->position + (int)it2->getSequence().size() - 1, it_i->AABefore, it_i->AAAfter);
it2->addPeptideEvidence(pe);
runidx_to_protidx[run_idx].insert(it_i->protein_index); // fill protein hits
if (protein_is_decoy[it_i->protein_index])
{
matches_decoy = true;
}
else
{
matches_target = true;
}
}
if (matches_decoy && matches_target)
{
it2->setMetaValue("target_decoy", "target+decoy");
++stats_count_m_td;
}
else if (matches_target)
{
it2->setMetaValue("target_decoy", "target");
++stats_count_m_t;
}
else if (matches_decoy)
{
it2->setMetaValue("target_decoy", "decoy");
++stats_count_m_d;
} // else: could match to no protein (i.e. both are false)
//else ... // not required (handled below; see stats_unmatched);
if (prot_indices.size() == 1)
{
it2->setMetaValue("protein_references", "unique");
++stats_matched_unique;
}
else if (prot_indices.size() > 1)
{
it2->setMetaValue("protein_references", "non-unique");
++stats_matched_multi;
}
else
{
it2->setMetaValue("protein_references", "unmatched");
++stats_unmatched;
if (stats_unmatched < 15) OPENMS_LOG_INFO << "Unmatched peptide: " << it2->getSequence() << "\n";
else if (stats_unmatched == 15) OPENMS_LOG_INFO << "Unmatched peptide: ...\n";
}
++pep_idx; // next hit
}
}
Size total_peptides = stats_count_m_t + stats_count_m_d + stats_count_m_td + stats_unmatched;
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Peptide statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " unmatched : " << stats_unmatched << " (" << stats_unmatched * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " target/decoy:\n";
OPENMS_LOG_INFO << " match to target DB only: " << stats_count_m_t << " (" << stats_count_m_t * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to decoy DB only : " << stats_count_m_d << " (" << stats_count_m_d * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << " match to both : " << stats_count_m_td << " (" << stats_count_m_td * 100 / total_peptides << " %)\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " mapping to proteins:\n";
OPENMS_LOG_INFO << " no match (to 0 protein) : " << stats_unmatched << "\n";
OPENMS_LOG_INFO << " unique match (to 1 protein) : " << stats_matched_unique << "\n";
OPENMS_LOG_INFO << " non-unique match (to >1 protein): " << stats_matched_multi << std::endl;
/// for proteins --> peptides
Size stats_matched_proteins(0), stats_matched_new_proteins(0), stats_orphaned_proteins(0), stats_proteins_target(0), stats_proteins_decoy(0);
// all peptides contain the correct protein hit references, now update the protein hits
for (Size run_idx = 0; run_idx < prot_ids.size(); ++run_idx)
{
std::set<Size> masterset = runidx_to_protidx[run_idx]; // all protein matches from above
std::vector<ProteinHit>& phits = prot_ids[run_idx].getHits();
{
// go through existing protein hits and count orphaned proteins (with no peptide hits)
std::vector<ProteinHit> orphaned_hits;
for (std::vector<ProteinHit>::iterator p_hit = phits.begin(); p_hit != phits.end(); ++p_hit)
{
const String& acc = p_hit->getAccession();
if (!acc_to_prot.has(acc)) // acc_to_prot only contains found proteins from current run
{ // old hit is orphaned
++stats_orphaned_proteins;
if (keep_unreferenced_proteins_)
{
p_hit->setMetaValue("target_decoy", "");
orphaned_hits.push_back(*p_hit);
}
}
}
// only keep orphaned hits (if any)
phits = orphaned_hits;
}
// add new protein hits
FASTAFile::FASTAEntry fe;
phits.reserve(phits.size() + masterset.size());
for (std::set<Size>::const_iterator it = masterset.begin(); it != masterset.end(); ++it)
{
ProteinHit hit;
hit.setAccession(protein_accessions[*it]);
if (write_protein_sequence_ || write_protein_description_)
{
proteins.readAt(fe, *it);
if (write_protein_sequence_)
{
hit.setSequence(fe.sequence);
} // no else, since sequence is empty by default
if (write_protein_description_)
{
hit.setDescription(fe.description);
} // no else, since description is empty by default
}
if (protein_is_decoy[*it])
{
hit.setMetaValue("target_decoy", "decoy");
++stats_proteins_decoy;
}
else
{
hit.setMetaValue("target_decoy", "target");
++stats_proteins_target;
}
phits.push_back(hit);
++stats_matched_new_proteins;
}
stats_matched_proteins += phits.size();
}
OPENMS_LOG_INFO << "-----------------------------------\n";
OPENMS_LOG_INFO << "Protein statistics\n";
OPENMS_LOG_INFO << "\n";
OPENMS_LOG_INFO << " total proteins searched: " << proteins.size() << "\n";
OPENMS_LOG_INFO << " matched proteins : " << stats_matched_proteins << " (" << stats_matched_new_proteins << " new)\n";
if (stats_matched_proteins)
{ // prevent Division-by-0 Exception
OPENMS_LOG_INFO << " matched target proteins: " << stats_proteins_target << " (" << stats_proteins_target * 100 / stats_matched_proteins << " %)\n";
OPENMS_LOG_INFO << " matched decoy proteins : " << stats_proteins_decoy << " (" << stats_proteins_decoy * 100 / stats_matched_proteins << " %)\n";
}
OPENMS_LOG_INFO << " orphaned proteins : " << stats_orphaned_proteins << (keep_unreferenced_proteins_ ? " (all kept)" : " (all removed)\n");
OPENMS_LOG_INFO << "-----------------------------------" << std::endl;
/// exit if no peptides were matched to decoy
bool has_error = false;
if (invalid_protein_sequence)
{
OPENMS_LOG_ERROR << "Error: One or more protein sequences contained the characters '[' or '(', which are illegal in protein sequences."
<< "\nPeptide hits might be masked by these characters (which usually indicate presence of modifications).\n";
has_error = true;
}
if ((stats_count_m_d + stats_count_m_td) == 0)
{
String msg("No peptides were matched to the decoy portion of the database! Did you provide the correct concatenated database? Are your 'decoy_string' (=" + String(decoy_string_) + ") and 'decoy_string_position' (=" + String(param_.getValue("decoy_string_position")) + ") settings correct?");
if (missing_decoy_action_ == "error")
{
OPENMS_LOG_ERROR << "Error: " << msg << "\nSet 'missing_decoy_action' to 'warn' if you are sure this is ok!\nAborting ..." << std::endl;
has_error = true;
}
else if (missing_decoy_action_ == "warn")
{
OPENMS_LOG_WARN << "Warn: " << msg << "\nSet 'missing_decoy_action' to 'error' if you want to elevate this to an error!" << std::endl;
}
else // silent
{
}
}
if ((!allow_unmatched_) && (stats_unmatched > 0))
{
OPENMS_LOG_ERROR << "PeptideIndexer found unmatched peptides, which could not be associated to a protein.\n"
<< "Potential solutions:\n"
<< " - check your FASTA database for completeness\n"
<< " - set 'enzyme:specificity' to match the identification parameters of the search engine\n"
<< " - some engines (e.g. X! Tandem) employ loose cutting rules generating non-tryptic peptides;\n"
<< " if you trust them, disable enzyme specificity\n"
<< " - increase 'aaa_max' to allow more ambiguous amino acids\n"
<< " - as a last resort: use the 'allow_unmatched' option to accept unmatched peptides\n"
<< " (note that unmatched peptides cannot be used for FDR calculation or quantification)\n";
has_error = true;
}
if (has_error)
{
OPENMS_LOG_ERROR << "Result files will be written, but PeptideIndexer will exit with an error code." << std::endl;
return UNEXPECTED_RESULT;
}
return EXECUTION_OK;
}
const String& getDecoyString() const;
bool isPrefix() const;
protected:
struct PeptideProteinMatchInformation
{
OpenMS::Size protein_index; //< index of the protein the peptide is contained in
OpenMS::Int position; //< the position of the peptide in the protein
char AABefore; //< the amino acid after the peptide in the protein
char AAAfter; //< the amino acid before the peptide in the protein
const std::tuple<const Size&, const Int&, const char&, const char&> tie() const
{
return std::tie(protein_index, position, AABefore, AAAfter);
}
bool operator<(const PeptideProteinMatchInformation& other) const
{
return tie() < other.tie();
}
bool operator==(const PeptideProteinMatchInformation& other) const
{
return tie() == other.tie();
}
};
struct FoundProteinFunctor
{
public:
typedef std::map<OpenMS::Size, std::set<PeptideProteinMatchInformation> > MapType;
MapType pep_to_prot; //< peptide index --> protein indices
OpenMS::Size filter_passed; //< number of accepted hits (passing addHit() constraints)
OpenMS::Size filter_rejected; //< number of rejected hits (not passing addHit())
private:
ProteaseDigestion enzyme_;
bool xtandem_; //< are we checking xtandem cleavage rules?
public:
explicit FoundProteinFunctor(const ProteaseDigestion& enzyme, bool xtandem) :
pep_to_prot(), filter_passed(0), filter_rejected(0), enzyme_(enzyme), xtandem_(xtandem)
{
}
void merge(FoundProteinFunctor& other)
{
if (pep_to_prot.empty())
{ // first merge is easy
pep_to_prot.swap(other.pep_to_prot);
}
else
{
for (FoundProteinFunctor::MapType::const_iterator it = other.pep_to_prot.begin(); it != other.pep_to_prot.end(); ++it)
{ // augment set
this->pep_to_prot[it->first].insert(other.pep_to_prot[it->first].begin(), other.pep_to_prot[it->first].end());
}
other.pep_to_prot.clear();
}
// cheap members
this->filter_passed += other.filter_passed;
other.filter_passed = 0;
this->filter_rejected += other.filter_rejected;
other.filter_rejected = 0;
}
void addHit(const OpenMS::Size idx_pep,
const OpenMS::Size idx_prot,
const OpenMS::Size len_pep,
const OpenMS::String& seq_prot,
OpenMS::Int position)
{
if (enzyme_.isValidProduct(seq_prot, position, len_pep, true, true, xtandem_))
{
PeptideProteinMatchInformation match;
match.protein_index = idx_prot;
match.position = position;
match.AABefore = (position == 0) ? PeptideEvidence::N_TERMINAL_AA : seq_prot[position - 1];
match.AAAfter = (position + len_pep >= seq_prot.size()) ? PeptideEvidence::C_TERMINAL_AA : seq_prot[position + len_pep];
pep_to_prot[idx_pep].insert(match);
++filter_passed;
}
else
{
//std::cerr << "REJECTED Peptide " << seq_pep << " with hit to protein "
// << seq_prot << " at position " << position << std::endl;
++filter_rejected;
}
}
};
inline void addHits_(AhoCorasickAmbiguous& fuzzyAC, const AhoCorasickAmbiguous::FuzzyACPattern& pattern, const AhoCorasickAmbiguous::PeptideDB& pep_DB, const String& prot, const String& full_prot, SignedSize idx_prot, Int offset, FoundProteinFunctor& func_threads) const
{
fuzzyAC.setProtein(prot);
while (fuzzyAC.findNext(pattern))
{
const seqan::Peptide& tmp_pep = pep_DB[fuzzyAC.getHitDBIndex()];
func_threads.addHit(fuzzyAC.getHitDBIndex(), idx_prot, length(tmp_pep), full_prot, fuzzyAC.getHitProteinPosition() + offset);
}
}
void updateMembers_() override;
String decoy_string_;
bool prefix_;
String missing_decoy_action_;
String enzyme_name_;
String enzyme_specificity_;
bool write_protein_sequence_;
bool write_protein_description_;
bool keep_unreferenced_proteins_;
bool allow_unmatched_;
bool IL_equivalent_;
Int aaa_max_;
Int mm_max_;
};
}
|
GB_binop__gt_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_01__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_03__gt_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint16)
// A*D function (colscale): GB (_AxD__gt_uint16)
// D*A function (rowscale): GB (_DxB__gt_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__gt_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__gt_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint16)
// C=scalar+B GB (_bind1st__gt_uint16)
// C=scalar+B' GB (_bind1st_tran__gt_uint16)
// C=A+scalar GB (_bind2nd__gt_uint16)
// C=A'+scalar GB (_bind2nd_tran__gt_uint16)
// C type: bool
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij > bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x > y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GT || GxB_NO_UINT16 || GxB_NO_GT_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__gt_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *restrict Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__gt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__gt_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__gt_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__gt_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x > bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__gt_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij > y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x > aij) ; \
}
GrB_Info GB (_bind1st_tran__gt_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij > y) ; \
}
GrB_Info GB (_bind2nd_tran__gt_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
QuadNode.h | /*
* QuadNode.h
*
* Created on: 21.05.2014
* Author: Moritz v. Looz (moritz.looz-corswarem@kit.edu)
*/
#ifndef QUADNODE_H_
#define QUADNODE_H_
#include <vector>
#include <algorithm>
#include <functional>
#include <assert.h>
#include "../../auxiliary/Log.h"
#include "../../auxiliary/Parallel.h"
#include "../../geometric/HyperbolicSpace.h"
using std::vector;
using std::min;
using std::max;
using std::cos;
namespace NetworKit {
template <class T, bool poincare = true>
class QuadNode {
friend class QuadTreeGTest;
private:
double leftAngle;
double minR;
double rightAngle;
double maxR;
Point2D<double> a,b,c,d;
unsigned capacity;
static const unsigned coarsenLimit = 4;
count subTreeSize;
std::vector<T> content;
std::vector<Point2D<double> > positions;
std::vector<double> angles;
std::vector<double> radii;
bool isLeaf;
bool splitTheoretical;
double alpha;
double balance;
index ID;
double lowerBoundR;
public:
std::vector<QuadNode> children;
QuadNode() {
//This should never be called.
leftAngle = 0;
rightAngle = 0;
minR = 0;
maxR = 0;
capacity = 20;
isLeaf = true;
subTreeSize = 0;
balance = 0.5;
splitTheoretical = false;
alpha = 1;
lowerBoundR = maxR;
ID = 0;
}
/**
* Construct a QuadNode for polar coordinates.
*
*
* @param leftAngle Minimal angular coordinate of region, in radians from 0 to 2\pi
* @param minR Minimal radial coordinate of region, between 0 and 1
* @param rightAngle Maximal angular coordinate of region, in radians from 0 to 2\pi
* @param maxR Maximal radial coordinate of region, between 0 and 1
* @param capacity Number of points a leaf cell can store before splitting
* @param minDiameter Minimal diameter of a quadtree node. If the node is already smaller, don't split even if over capacity. Default is 0
* @param splitTheoretical Whether to split in a theoretically optimal way or in a way to decrease measured running times
* @param alpha dispersion Parameter of the point distribution. Only has an effect if theoretical split is true
* @param diagnostics Count how many necessary and unnecessary comparisons happen in leaf cells? Will cause race condition and false sharing in parallel use
*
*/
QuadNode(double leftAngle, double minR, double rightAngle, double maxR, unsigned capacity = 1000, bool splitTheoretical = false, double alpha = 1, double balance = 0.5) {
if (balance <= 0 || balance >= 1) throw std::runtime_error("Quadtree balance parameter must be between 0 and 1.");
if (poincare && maxR > 1) throw std::runtime_error("The Poincare disk has a radius of 1, cannot create quadtree larger than that!");
this->leftAngle = leftAngle;
this->minR = minR;
this->maxR = maxR;
this->rightAngle = rightAngle;
this->a = HyperbolicSpace::polarToCartesian(leftAngle, minR);
this->b = HyperbolicSpace::polarToCartesian(rightAngle, minR);
this->c = HyperbolicSpace::polarToCartesian(rightAngle, maxR);
this->d = HyperbolicSpace::polarToCartesian(leftAngle, maxR);
this->capacity = capacity;
this->alpha = alpha;
this->splitTheoretical = splitTheoretical;
this->balance = balance;
this->lowerBoundR = maxR;
this->ID = 0;
isLeaf = true;
subTreeSize = 0;
}
void split() {
assert(isLeaf);
//heavy lifting: split up!
double middleAngle = (rightAngle - leftAngle) / 2 + leftAngle;
/**
* we want to make sure the space is evenly divided to obtain a balanced tree
* Simply halving the radius will cause a larger space for the outer Quadnode, resulting in an unbalanced tree
*/
double middleR;
if (poincare) {
if (splitTheoretical) {
double hyperbolicOuter = HyperbolicSpace::EuclideanRadiusToHyperbolic(maxR);
double hyperbolicInner = HyperbolicSpace::EuclideanRadiusToHyperbolic(minR);
double hyperbolicMiddle = acosh((1-balance)*cosh(alpha*hyperbolicOuter) + balance*cosh(alpha*hyperbolicInner))/alpha;
middleR = HyperbolicSpace::hyperbolicRadiusToEuclidean(hyperbolicMiddle);
} else {
double nom = maxR - minR;
double denom = pow((1-maxR*maxR)/(1-minR*minR), 0.5)+1;
middleR = nom/denom + minR;
}
} else {
middleR = acosh((1-balance)*cosh(alpha*maxR) + balance*cosh(alpha*minR))/alpha;
}
//one could also use the median here. Results in worse asymptotical complexity, but maybe better runtime?
assert(middleR < maxR);
assert(middleR > minR);
QuadNode<index,poincare> southwest(leftAngle, minR, middleAngle, middleR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> southeast(middleAngle, minR, rightAngle, middleR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> northwest(leftAngle, middleR, middleAngle, maxR, capacity, splitTheoretical, alpha, balance);
QuadNode<index,poincare> northeast(middleAngle, middleR, rightAngle, maxR, capacity, splitTheoretical, alpha, balance);
children = {southwest, southeast, northwest, northeast};
isLeaf = false;
}
/**
* Add a point at polar coordinates (angle, R) with content input. May split node if capacity is full
*
* @param input arbitrary content, in our case an index
* @param angle angular coordinate of point, between 0 and 2 pi.
* @param R radial coordinate of point, between 0 and 1.
*/
void addContent(T input, double angle, double R) {
assert(this->responsible(angle, R));
if (lowerBoundR > R) lowerBoundR = R;
if (isLeaf) {
if (content.size() + 1 < capacity) {
content.push_back(input);
angles.push_back(angle);
radii.push_back(R);
Point2D<double> pos = HyperbolicSpace::polarToCartesian(angle, R);
positions.push_back(pos);
} else {
split();
for (index i = 0; i < content.size(); i++) {
this->addContent(content[i], angles[i], radii[i]);
}
assert(subTreeSize == content.size());//we have added everything twice
subTreeSize = content.size();
content.clear();
angles.clear();
radii.clear();
positions.clear();
this->addContent(input, angle, R);
}
}
else {
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (children[i].responsible(angle, R)) {
children[i].addContent(input, angle, R);
break;
}
}
subTreeSize++;
}
}
/**
* Remove content at polar coordinates (angle, R). May cause coarsening of the quadtree
*
* @param input Content to be removed
* @param angle Angular coordinate
* @param R Radial coordinate
*
* @return True if content was found and removed, false otherwise
*/
bool removeContent(T input, double angle, double R) {
if (!responsible(angle, R)) return false;
if (isLeaf) {
index i = 0;
for (; i < content.size(); i++) {
if (content[i] == input) break;
}
if (i < content.size()) {
assert(angles[i] == angle);
assert(radii[i] == R);
//remove element
content.erase(content.begin()+i);
positions.erase(positions.begin()+i);
angles.erase(angles.begin()+i);
radii.erase(radii.begin()+i);
return true;
} else {
return false;
}
}
else {
bool removed = false;
bool allLeaves = true;
assert(children.size() > 0);
for (index i = 0; i < children.size(); i++) {
if (!children[i].isLeaf) allLeaves = false;
if (children[i].removeContent(input, angle, R)) {
assert(!removed);
removed = true;
}
}
if (removed) subTreeSize--;
//coarsen?
if (removed && allLeaves && size() < coarsenLimit) {
//coarsen!!
//why not assert empty containers and then insert directly?
vector<T> allContent;
vector<Point2D<double> > allPositions;
vector<double> allAngles;
vector<double> allRadii;
for (index i = 0; i < children.size(); i++) {
allContent.insert(allContent.end(), children[i].content.begin(), children[i].content.end());
allPositions.insert(allPositions.end(), children[i].positions.begin(), children[i].positions.end());
allAngles.insert(allAngles.end(), children[i].angles.begin(), children[i].angles.end());
allRadii.insert(allRadii.end(), children[i].radii.begin(), children[i].radii.end());
}
assert(subTreeSize == allContent.size());
assert(subTreeSize == allPositions.size());
assert(subTreeSize == allAngles.size());
assert(subTreeSize == allRadii.size());
children.clear();
content.swap(allContent);
positions.swap(allPositions);
angles.swap(allAngles);
radii.swap(allRadii);
isLeaf = true;
}
return removed;
}
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
*
* @param query Center of the Euclidean query circle, given in Cartesian coordinates
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(Point2D<double> query, double radius) const {
double phi, r;
HyperbolicSpace::cartesianToPolar(query, phi, r);
if (responsible(phi, r)) return false;
//if using native coordinates, call distance calculation
if (!poincare) return hyperbolicDistances(phi, r).first > radius;
//get four edge points
double topDistance, bottomDistance, leftDistance, rightDistance;
if (phi < leftAngle || phi > rightAngle) {
topDistance = min(c.distance(query), d.distance(query));
} else {
topDistance = abs(r - maxR);
}
if (topDistance <= radius) return false;
if (phi < leftAngle || phi > rightAngle) {
bottomDistance = min(a.distance(query), b.distance(query));
} else {
bottomDistance = abs(r - minR);
}
if (bottomDistance <= radius) return false;
double minDistanceR = r*cos(abs(phi-leftAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
leftDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
leftDistance = min(a.distance(query), d.distance(query));
}
if (leftDistance <= radius) return false;
minDistanceR = r*cos(abs(phi-rightAngle));
if (minDistanceR > minR && minDistanceR < maxR) {
rightDistance = query.distance(HyperbolicSpace::polarToCartesian(phi, minDistanceR));
} else {
rightDistance = min(b.distance(query), c.distance(query));
}
if (rightDistance <= radius) return false;
return true;
}
/**
* Check whether the region managed by this node lies outside of an Euclidean circle.
* Functionality is the same as in the method above, but it takes polar coordinates instead of Cartesian ones
*
* @param angle_c Angular coordinate of the Euclidean query circle's center
* @param r_c Radial coordinate of the Euclidean query circle's center
* @param radius Radius of the Euclidean query circle
*
* @return True if the region managed by this node lies completely outside of the circle
*/
bool outOfReach(double angle_c, double r_c, double radius) const {
if (responsible(angle_c, r_c)) return false;
Point2D<double> query = HyperbolicSpace::polarToCartesian(angle_c, r_c);
return outOfReach(query, radius);
}
/**
* @param phi Angular coordinate of query point
* @param r_h radial coordinate of query point in poincare disk
*/
std::pair<double, double> hyperbolicDistances(double phi, double r) const {
double minRHyper, maxRHyper, r_h;
if (poincare) {
minRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->minR);
maxRHyper=HyperbolicSpace::EuclideanRadiusToHyperbolic(this->maxR);
r_h = HyperbolicSpace::EuclideanRadiusToHyperbolic(r);
} else {
minRHyper=this->minR;
maxRHyper=this->maxR;
r_h = r;
}
double coshr = cosh(r_h);
double sinhr = sinh(r_h);
double coshMinR = cosh(minRHyper);
double coshMaxR = cosh(maxRHyper);
double sinhMinR = sinh(minRHyper);
double sinhMaxR = sinh(maxRHyper);
double cosDiffLeft = cos(phi - leftAngle);
double cosDiffRight = cos(phi - rightAngle);
/**
* If the query point is not within the quadnode, the distance minimum is on the border.
* Need to check whether extremum is between corners:
*/
double coshMinDistance, coshMaxDistance;
//Left border
double lowerLeftDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffLeft;
double upperLeftDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffLeft;
if (responsible(phi, r)) coshMinDistance = 1; //strictly speaking, this is wrong
else coshMinDistance = min(lowerLeftDistance, upperLeftDistance);
coshMaxDistance = max(lowerLeftDistance, upperLeftDistance);
//double a = cosh(r_h);
double b = sinhr*cosDiffLeft;
double extremum = log((coshr+b)/(coshr-b))/2;
if (extremum < maxRHyper && extremum >= minRHyper) {
double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffLeft;
coshMinDistance = min(coshMinDistance, extremeDistance);
coshMaxDistance = max(coshMaxDistance, extremeDistance);
}
/**
* cosh is a function from [0,\infty) to [1, \infty)
* Variables thus need
*/
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//Right border
double lowerRightDistance = coshMinR*coshr-sinhMinR*sinhr*cosDiffRight;
double upperRightDistance = coshMaxR*coshr-sinhMaxR*sinhr*cosDiffRight;
coshMinDistance = min(coshMinDistance, lowerRightDistance);
coshMinDistance = min(coshMinDistance, upperRightDistance);
coshMaxDistance = max(coshMaxDistance, lowerRightDistance);
coshMaxDistance = max(coshMaxDistance, upperRightDistance);
b = sinhr*cosDiffRight;
extremum = log((coshr+b)/(coshr-b))/2;
if (extremum < maxRHyper && extremum >= minRHyper) {
double extremeDistance = cosh(extremum)*coshr-sinh(extremum)*sinhr*cosDiffRight;
coshMinDistance = min(coshMinDistance, extremeDistance);
coshMaxDistance = max(coshMaxDistance, extremeDistance);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//upper and lower borders
if (phi >= leftAngle && phi < rightAngle) {
double lower = cosh(abs(r_h-minRHyper));
double upper = cosh(abs(r_h-maxRHyper));
coshMinDistance = min(coshMinDistance, lower);
coshMinDistance = min(coshMinDistance, upper);
coshMaxDistance = max(coshMaxDistance, upper);
coshMaxDistance = max(coshMaxDistance, lower);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
//again with mirrored phi
double mirrorphi;
if (phi >= PI) mirrorphi = phi - PI;
else mirrorphi = phi + PI;
if (mirrorphi >= leftAngle && mirrorphi < rightAngle) {
double lower = coshMinR*coshr+sinhMinR*sinhr;
double upper = coshMaxR*coshr+sinhMaxR*sinhr;
coshMinDistance = min(coshMinDistance, lower);
coshMinDistance = min(coshMinDistance, upper);
coshMaxDistance = max(coshMaxDistance, upper);
coshMaxDistance = max(coshMaxDistance, lower);
}
assert(coshMaxDistance >= 1);
assert(coshMinDistance >= 1);
double minDistance, maxDistance;
minDistance = acosh(coshMinDistance);
maxDistance = acosh(coshMaxDistance);
assert(maxDistance >= 0);
assert(minDistance >= 0);
return std::pair<double, double>(minDistance, maxDistance);
}
/**
* Does the point at (angle, r) fall inside the region managed by this QuadNode?
*
* @param angle Angular coordinate of input point
* @param r Radial coordinate of input points
*
* @return True if input point lies within the region of this QuadNode
*/
bool responsible(double angle, double r) const {
return (angle >= leftAngle && angle < rightAngle && r >= minR && r < maxR);
}
/**
* Get all Elements in this QuadNode or a descendant of it
*
* @return vector of content type T
*/
std::vector<T> getElements() const {
if (isLeaf) {
return content;
} else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
vector<T> result;
for (index i = 0; i < children.size(); i++) {
std::vector<T> subresult = children[i].getElements();
result.insert(result.end(), subresult.begin(), subresult.end());
}
return result;
}
}
void getCoordinates(vector<double> &anglesContainer, vector<double> &radiiContainer) const {
assert(angles.size() == radii.size());
if (isLeaf) {
anglesContainer.insert(anglesContainer.end(), angles.begin(), angles.end());
radiiContainer.insert(radiiContainer.end(), radii.begin(), radii.end());
}
else {
assert(content.size() == 0);
assert(angles.size() == 0);
assert(radii.size() == 0);
for (index i = 0; i < children.size(); i++) {
children[i].getCoordinates(anglesContainer, radiiContainer);
}
}
}
/**
* Don't use this!
* Code is still in here for a unit test.
*
* Get copy of the leaf cell responsible for a point at (angle, r).
* Expensive because it copies the whole subtree, causes assertion failure if called with the wrong arguments
*
* @param angle Angular coordinate of point
* @param r Radial coordinate of point
*
* @return Copy of leaf cell containing point, or dummy cell not responsible for point
*
*/
QuadNode<T>& getAppropriateLeaf(double angle, double r) {
assert(this->responsible(angle, r));
if (isLeaf) return *this;//will this return the reference to the subtree itself or to a copy?
else {
for (index i = 0; i < children.size(); i++) {
bool foundResponsibleChild = false;
if (children[i].responsible(angle, r)) {
assert(foundResponsibleChild == false);
foundResponsibleChild = true;
return children[i].getAppropriateLeaf(angle, r);
}
}
throw std::runtime_error("No responsible child found.");
}
}
/**
* Main query method, get points lying in a Euclidean circle around the center point.
* Optional limits can be given to get a different result or to reduce unnecessary comparisons
*
* Elements are pushed onto a vector which is a required argument. This is done to reduce copying
*
* Safe to call in parallel if diagnostics are disabled
*
* @param center Center of the query circle
* @param radius Radius of the query circle
* @param result Reference to the vector where the results will be stored
* @param minAngle Optional value for the minimum angular coordinate of the query region
* @param maxAngle Optional value for the maximum angular coordinate of the query region
* @param lowR Optional value for the minimum radial coordinate of the query region
* @param highR Optional value for the maximum radial coordinate of the query region
*/
void getElementsInEuclideanCircle(Point2D<double> center, double radius, vector<T> &result, double minAngle=0, double maxAngle=2*PI, double lowR=0, double highR = 1) const {
if (!poincare) throw std::runtime_error("Euclidean query circles not yet implemented for native hyperbolic coordinates.");
if (minAngle >= rightAngle || maxAngle <= leftAngle || lowR >= maxR || highR < lowerBoundR) return;
if (outOfReach(center, radius)) {
return;
}
if (isLeaf) {
const double rsq = radius*radius;
const double queryX = center[0];
const double queryY = center[1];
const count cSize = content.size();
for (index i = 0; i < cSize; i++) {
const double deltaX = positions[i].getX() - queryX;
const double deltaY = positions[i].getY() - queryY;
if (deltaX*deltaX + deltaY*deltaY < rsq) {
result.push_back(content[i]);
}
}
} else {
for (index i = 0; i < children.size(); i++) {
children[i].getElementsInEuclideanCircle(center, radius, result, minAngle, maxAngle, lowR, highR);
}
}
}
count getElementsProbabilistically(Point2D<double> euQuery, std::function<double(double)> prob, bool suppressLeft, vector<T> &result) const {
double phi_q, r_q;
HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q);
if (suppressLeft && phi_q > rightAngle) return 0;
TRACE("Getting hyperbolic distances");
auto distancePair = hyperbolicDistances(phi_q, r_q);
double probUB = prob(distancePair.first);
double probLB = prob(distancePair.second);
#ifndef NDEBUG
assert(probLB <= probUB);
#else
((void)(probLB));
#endif
if (probUB > 0.5) probUB = 1;//if we are going to take every second element anyway, no use in calculating expensive jumps
if (probUB == 0) return 0;
//TODO: return whole if probLB == 1
double probdenom = std::log(1-probUB);
if (probdenom == 0) {
DEBUG(probUB, " not zero, but too small too process. Ignoring.");
return 0;
}
TRACE("probUB: ", probUB, ", probdenom: ", probdenom);
count expectedNeighbours = probUB*size();
count candidatesTested = 0;
if (isLeaf) {
const count lsize = content.size();
TRACE("Leaf of size ", lsize);
for (index i = 0; i < lsize; i++) {
//jump!
if (probUB < 1) {
double random = Aux::Random::real();
double delta = std::log(random) / probdenom;
assert(delta == delta);
assert(delta >= 0);
i += delta;
if (i >= lsize) break;
TRACE("Jumped with delta ", delta, " arrived at ", i);
}
//see where we've arrived
candidatesTested++;
double distance;
if (poincare) {
distance = HyperbolicSpace::poincareMetric(positions[i], euQuery);
} else {
distance = HyperbolicSpace::nativeDistance(angles[i], radii[i], phi_q, r_q);
}
assert(distance >= distancePair.first);
double q = prob(distance);
q = q / probUB; //since the candidate was selected by the jumping process, we have to adjust the probabilities
assert(q <= 1);
assert(q >= 0);
//accept?
double acc = Aux::Random::real();
if (acc < q) {
TRACE("Accepted node ", i, " with probability ", q, ".");
result.push_back(content[i]);
}
}
} else {
if (expectedNeighbours < 1) {//select candidates directly instead of calling recursively
TRACE("probUB = ", probUB, ", switching to direct candidate selection.");
assert(probUB < 1);
const count stsize = size();
for (index i = 0; i < stsize; i++) {
double delta = std::log(Aux::Random::real()) / probdenom;
assert(delta >= 0);
i += delta;
TRACE("Jumped with delta ", delta, " arrived at ", i, ". Calling maybeGetKthElement.");
if (i < size()) maybeGetKthElement(probUB, euQuery, prob, i, result);//this could be optimized. As of now, the offset is subtracted separately for each point
else break;
candidatesTested++;
}
} else {//carry on as normal
for (index i = 0; i < children.size(); i++) {
TRACE("Recursively calling child ", i);
candidatesTested += children[i].getElementsProbabilistically(euQuery, prob, suppressLeft, result);
}
}
}
//DEBUG("Expected at most ", expectedNeighbours, " neighbours, got ", result.size() - offset);
return candidatesTested;
}
void maybeGetKthElement(double upperBound, Point2D<double> euQuery, std::function<double(double)> prob, index k, vector<T> &circleDenizens) const {
TRACE("Maybe get element ", k, " with upper Bound ", upperBound);
assert(k < size());
if (isLeaf) {
double distance;
if (poincare) {
distance = HyperbolicSpace::poincareMetric(positions[k], euQuery);
} else {
double phi_q, r_q;
HyperbolicSpace::cartesianToPolar(euQuery, phi_q, r_q);
distance = HyperbolicSpace::nativeDistance(angles[k], radii[k], phi_q, r_q);
}
double acceptance = prob(distance)/upperBound;
TRACE("Is leaf, accept with ", acceptance);
if (Aux::Random::real() < acceptance) circleDenizens.push_back(content[k]);
} else {
TRACE("Call recursively.");
index offset = 0;
for (index i = 0; i < children.size(); i++) {
count childsize = children[i].size();
if (k - offset < childsize) {
children[i].maybeGetKthElement(upperBound, euQuery, prob, k - offset, circleDenizens);
break;
}
offset += childsize;
}
}
}
/**
* Shrink all vectors in this subtree to fit the content.
* Call after quadtree construction is complete, causes better memory usage and cache efficiency
*/
void trim() {
content.shrink_to_fit();
positions.shrink_to_fit();
angles.shrink_to_fit();
radii.shrink_to_fit();
if (!isLeaf) {
for (index i = 0; i < children.size(); i++) {
children[i].trim();
}
}
}
/**
* Number of points lying in the region managed by this QuadNode
*/
count size() const {
return isLeaf ? content.size() : subTreeSize;
}
void recount() {
subTreeSize = 0;
for (index i = 0; i < children.size(); i++) {
children[i].recount();
subTreeSize += children[i].size();
}
}
/**
* Height of subtree hanging from this QuadNode
*/
count height() const {
count result = 1;//if leaf node, the children loop will not execute
for (auto child : children) result = std::max(result, child.height()+1);
return result;
}
/**
* Leaf cells in the subtree hanging from this QuadNode
*/
count countLeaves() const {
if (isLeaf) return 1;
count result = 0;
for (index i = 0; i < children.size(); i++) {
result += children[i].countLeaves();
}
return result;
}
double getLeftAngle() const {
return leftAngle;
}
double getRightAngle() const {
return rightAngle;
}
double getMinR() const {
return minR;
}
double getMaxR() const {
return maxR;
}
index getID() const {
return ID;
}
index indexSubtree(index nextID) {
index result = nextID;
assert(children.size() == 4 || children.size() == 0);
for (index i = 0; i < children.size(); i++) {
result = children[i].indexSubtree(result);
}
this->ID = result;
return result+1;
}
index getCellID(double phi, double r) const {
if (!responsible(phi, r)) return none;
if (isLeaf) return getID();
else {
for (index i = 0; i < children.size(); i++) {
index childresult = children[i].getCellID(phi, r);
if (childresult != none) return childresult;
}
throw std::runtime_error("No responsible child node found even though this node is responsible.");
}
}
index getMaxIDInSubtree() const {
if (isLeaf) return getID();
else {
index result = -1;
for (int i = 0; i < 4; i++) {
result = std::max(children[i].getMaxIDInSubtree(), result);
}
return std::max(result, getID());
}
}
count reindex(count offset) {
if (isLeaf)
{
#ifndef NETWORKIT_OMP2
#pragma omp task
#endif
{
index p = offset;
std::generate(content.begin(), content.end(), [&p](){return p++;});
}
offset += size();
} else {
for (int i = 0; i < 4; i++) {
offset = children[i].reindex(offset);
}
}
return offset;
}
};
}
#endif /* QUADNODE_H_ */
|
GB_unop__identity_int16_int8.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_int8
// op(A') function: GB_unop_tran__identity_int16_int8
// C type: int16_t
// A type: int8_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int8_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int8_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT8)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_int8
(
int16_t *Cx, // Cx and Ax may be aliased
const int8_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (int8_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
int8_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_int8
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
coverage.h | #ifndef COVERAGE_H
#define COVERAGE_H
#include <boost/container/flat_set.hpp>
#include <boost/dynamic_bitset.hpp>
#include <boost/iostreams/stream.hpp>
#include <boost/iostreams/stream_buffer.hpp>
#include <boost/iostreams/device/file.hpp>
#include <boost/iostreams/filtering_stream.hpp>
#include <boost/iostreams/filter/zlib.hpp>
#include <boost/iostreams/filter/gzip.hpp>
#include <boost/progress.hpp>
#include <htslib/sam.h>
#include "tags.h"
#include "util.h"
#include "msa.h"
#include "split.h"
namespace torali {
struct SpanPoint {
int32_t bppos;
int32_t svt;
uint32_t id;
SpanPoint() : bppos(0), svt(0), id(0) {}
explicit SpanPoint(int32_t bp) : bppos(bp), svt(0), id(0) {}
SpanPoint(int32_t bp, int32_t s, uint32_t identifier) : bppos(bp), svt(s), id(identifier) {}
};
struct BpRegion {
int32_t regionStart;
int32_t regionEnd;
int32_t bppos;
int32_t homLeft;
int32_t homRight;
int32_t svt;
uint32_t id;
uint8_t bpPoint;
BpRegion() : regionStart(0), regionEnd(0), bppos(0), homLeft(0), homRight(0), svt(0), id(0), bpPoint(0) {}
explicit BpRegion(int32_t bp) : regionStart(0), regionEnd(0), bppos(bp), homLeft(0), homRight(0), svt(0), id(0), bpPoint(0) {}
BpRegion(int32_t rs, int32_t re, int32_t bpos, int32_t hl, int32_t hr, int32_t s, uint32_t identifier, uint8_t bpp) : regionStart(rs), regionEnd(re), bppos(bpos), homLeft(hl), homRight(hr), svt(s), id(identifier), bpPoint(bpp) {}
};
template<typename TRecord>
struct SortBp : public std::binary_function<TRecord, TRecord, bool> {
inline bool operator()(TRecord const& s1, TRecord const& s2) const {
return (s1.bppos < s2.bppos);
}
};
struct SpanningCount {
int32_t refh1;
int32_t refh2;
int32_t alth1;
int32_t alth2;
std::vector<uint8_t> ref;
std::vector<uint8_t> alt;
SpanningCount() : refh1(0), refh2(0), alth1(0), alth2(0) {}
};
struct JunctionCount {
int32_t refh1;
int32_t refh2;
int32_t alth1;
int32_t alth2;
std::vector<uint8_t> ref;
std::vector<uint8_t> alt;
JunctionCount() : refh1(0), refh2(0), alth1(0), alth2(0) {}
};
template<typename TAlign, typename TQualities>
inline uint32_t
_getAlignmentQual(TAlign const& align, TQualities const& qual) {
typedef typename TAlign::index TAIndex;
uint32_t baseQualSum = 0;
uint32_t seqPtr = 0;
uint32_t alignedBases = 0;
for(TAIndex j = 0; j < (TAIndex) align.shape()[1]; ++j) {
if (align[1][j] != '-') {
if (align[0][j] != '-') {
++alignedBases;
baseQualSum += qual[seqPtr];
}
++seqPtr;
}
}
return (baseQualSum / alignedBases);
}
template<typename TPos>
inline int32_t
_cutRefStart(TPos const rStart, TPos const rEnd, TPos const offset, unsigned int bpPoint, int32_t const svt) {
if (_translocation(svt)) {
uint8_t ct = _getSpanOrientation(svt);
if (ct == 3) {
if (!bpPoint) return rEnd - offset;
else return rStart - offset;
} else {
if (bpPoint) return rEnd - offset;
else return rStart - offset;
}
} else {
if (svt == 3) {
if (!bpPoint) return rEnd - offset;
else return rStart - offset;
} else {
if (bpPoint) return rEnd - offset;
else return rStart - offset;
}
}
}
template<typename TPos>
inline int32_t
_cutRefEnd(TPos const rStart, TPos const rEnd, TPos const offset, unsigned int bpPoint, int32_t const svt) {
if (_translocation(svt)) {
uint8_t ct = _getSpanOrientation(svt);
if (ct == 3) {
if (!bpPoint) return rEnd + offset;
else return rStart + offset;
} else {
if (bpPoint) return rEnd + offset;
else return rStart + offset;
}
} else {
if (svt == 3) {
if (!bpPoint) return rEnd + offset;
else return rStart + offset;
} else {
if (bpPoint) return rEnd + offset;
else return rStart + offset;
}
}
}
template<typename TConfig, typename TSVs, typename TBreakProbes, typename TGenomicBpRegion>
inline void
_generateProbes(TConfig const& c, bam_hdr_t* hdr, TSVs& svs, TBreakProbes& refProbeArr, TBreakProbes& consProbeArr, TGenomicBpRegion& bpRegion, std::vector<bool>& svOnChr) {
typedef typename TBreakProbes::value_type TProbes;
// Preprocess REF and ALT
boost::posix_time::ptime noww = boost::posix_time::second_clock::local_time();
std::cout << '[' << boost::posix_time::to_simple_string(noww) << "] " << "Generate REF and ALT probes" << std::endl;
boost::progress_display show_progresss( hdr->n_targets );
TProbes refProbes(svs.size());
faidx_t* fai = fai_load(c.genome.string().c_str());
for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {
++show_progresss;
char* seq = NULL;
// Iterate all structural variants
for(typename TSVs::iterator itSV = svs.begin(); itSV != svs.end(); ++itSV) {
if ((itSV->chr != refIndex) && (itSV->chr2 != refIndex)) continue;
svOnChr[refIndex] = true;
// Lazy loading of reference sequence
if (seq == NULL) {
int32_t seqlen = -1;
std::string tname(hdr->target_name[refIndex]);
seq = faidx_fetch_seq(fai, tname.c_str(), 0, hdr->target_len[refIndex], &seqlen);
}
// Set tag alleles
if (itSV->chr == refIndex) {
itSV->alleles = _addAlleles(boost::to_upper_copy(std::string(seq + itSV->svStart - 1, seq + itSV->svStart)), std::string(hdr->target_name[itSV->chr2]), *itSV, itSV->svt);
}
if (!itSV->precise) continue;
// Get the reference sequence
if ((itSV->chr != itSV->chr2) && (itSV->chr2 == refIndex)) {
Breakpoint bp(*itSV);
_initBreakpoint(hdr, bp, (int32_t) itSV->consensus.size(), itSV->svt);
refProbes[itSV->id] = _getSVRef(seq, bp, refIndex, itSV->svt);
}
if (itSV->chr == refIndex) {
Breakpoint bp(*itSV);
if (_translocation(itSV->svt)) bp.part1 = refProbes[itSV->id];
if (itSV->svt ==4) {
int32_t bufferSpace = std::max((int32_t) ((itSV->consensus.size() - itSV->insLen) / 3), c.minimumFlankSize);
_initBreakpoint(hdr, bp, bufferSpace, itSV->svt);
} else _initBreakpoint(hdr, bp, (int32_t) itSV->consensus.size(), itSV->svt);
std::string svRefStr = _getSVRef(seq, bp, refIndex, itSV->svt);
// Find breakpoint to reference
typedef boost::multi_array<char, 2> TAlign;
TAlign align;
if (!_consRefAlignment(itSV->consensus, svRefStr, align, itSV->svt)) continue;
AlignDescriptor ad;
if (!_findSplit(c, itSV->consensus, svRefStr, align, ad, itSV->svt)) continue;
// Debug consensus to reference alignment
//std::cerr << itSV->id << std::endl;
//for(uint32_t i = 0; i<align.shape()[0]; ++i) {
//for(uint32_t j = 0; j<align.shape()[1]; ++j) std::cerr << align[i][j];
//std::cerr << std::endl;
//}
//std::cerr << std::endl;
// Iterate all samples
for (unsigned int bpPoint = 0; bpPoint<2; ++bpPoint) {
int32_t regionChr, regionStart, regionEnd, cutConsStart, cutConsEnd, cutRefStart, cutRefEnd, bppos;
if (bpPoint) {
regionChr = itSV->chr2;
regionStart = std::max(0, itSV->svEnd - c.minimumFlankSize);
regionEnd = std::min((uint32_t) (itSV->svEnd + c.minimumFlankSize), hdr->target_len[itSV->chr2]);
cutConsStart = ad.cEnd - ad.homLeft - c.minimumFlankSize;
cutConsEnd = ad.cEnd + ad.homRight + c.minimumFlankSize;
cutRefStart = _cutRefStart(ad.rStart, ad.rEnd, ad.homLeft + c.minimumFlankSize, bpPoint, itSV->svt);
cutRefEnd = _cutRefEnd(ad.rStart, ad.rEnd, ad.homRight + c.minimumFlankSize, bpPoint, itSV->svt);
bppos = itSV->svEnd;
} else {
regionChr = itSV->chr;
regionStart = std::max(0, itSV->svStart - c.minimumFlankSize);
regionEnd = std::min((uint32_t) (itSV->svStart + c.minimumFlankSize), hdr->target_len[itSV->chr]);
cutConsStart = ad.cStart - ad.homLeft - c.minimumFlankSize;
cutConsEnd = ad.cStart + ad.homRight + c.minimumFlankSize;
cutRefStart = _cutRefStart(ad.rStart, ad.rEnd, ad.homLeft + c.minimumFlankSize, bpPoint, itSV->svt);
cutRefEnd = _cutRefEnd(ad.rStart, ad.rEnd, ad.homRight + c.minimumFlankSize, bpPoint, itSV->svt);
bppos = itSV->svStart;
}
consProbeArr[bpPoint][itSV->id] = itSV->consensus.substr(cutConsStart, (cutConsEnd - cutConsStart));
refProbeArr[bpPoint][itSV->id] = svRefStr.substr(cutRefStart, (cutRefEnd - cutRefStart));
bpRegion[regionChr].push_back(BpRegion(regionStart, regionEnd, bppos, ad.homLeft, ad.homRight, itSV->svt, itSV->id, bpPoint));
}
}
}
if (seq != NULL) free(seq);
}
// Clean-up
fai_destroy(fai);
for(int32_t refIndex=0; refIndex < (int32_t) hdr->n_targets; ++refIndex) {
// Sort breakpoint regions
std::sort(bpRegion[refIndex].begin(), bpRegion[refIndex].end(), SortBp<BpRegion>());
}
}
template<typename TConfig, typename TSampleLibrary, typename TSVs, typename TCoverageCount, typename TCountMap, typename TSpanMap>
inline void
annotateCoverage(TConfig& c, TSampleLibrary& sampleLib, TSVs& svs, TCoverageCount& covCount, TCountMap& countMap, TSpanMap& spanMap)
{
typedef typename TCoverageCount::value_type::value_type TCovPair;
typedef typename TSpanMap::value_type::value_type TSpanPair;
typedef typename TCountMap::value_type::value_type TCountPair;
typedef std::vector<uint8_t> TQuality;
// Open file handles
typedef std::vector<samFile*> TSamFile;
typedef std::vector<hts_idx_t*> TIndex;
typedef std::vector<bam_hdr_t*> THeader;
TSamFile samfile(c.files.size());
TIndex idx(c.files.size());
THeader hdr(c.files.size());
int32_t totalTarget = 0;
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
samfile[file_c] = sam_open(c.files[file_c].string().c_str(), "r");
hts_set_fai_filename(samfile[file_c], c.genome.string().c_str());
idx[file_c] = sam_index_load(samfile[file_c], c.files[file_c].string().c_str());
hdr[file_c] = sam_hdr_read(samfile[file_c]);
totalTarget += hdr[file_c]->n_targets;
}
// Initialize coverage count maps
covCount.resize(c.files.size());
countMap.resize(c.files.size());
spanMap.resize(c.files.size());
for(uint32_t file_c = 0; file_c < c.files.size(); ++file_c) {
covCount[file_c].resize(svs.size(), TCovPair());
countMap[file_c].resize(svs.size(), TCountPair());
spanMap[file_c].resize(svs.size(), TSpanPair());
}
// Reference and consensus probes
typedef std::vector<std::string> TProbes;
typedef std::vector<TProbes> TBreakProbes;
TBreakProbes refProbeArr(2, TProbes()); // Left and right breakpoint
TBreakProbes consProbeArr(2, TProbes()); // Left and right breakpoint
for(uint32_t k = 0; k < 2; ++k) {
refProbeArr[k].resize(svs.size());
consProbeArr[k].resize(svs.size());
}
typedef std::vector<BpRegion> TBpRegion;
typedef std::vector<TBpRegion> TGenomicBpRegion;
TGenomicBpRegion bpRegion(hdr[0]->n_targets, TBpRegion());
std::vector<bool> svOnChr(hdr[0]->n_targets, false);
// Generate probes
_generateProbes(c, hdr[0], svs, refProbeArr, consProbeArr, bpRegion, svOnChr);
// Debug
//for(uint32_t k = 0; k < 2; ++k) {
//for(uint32_t i = 0; i < svs.size(); ++i) {
//std::cerr << k << ',' << i << ',' << refProbeArr[k][i] << ',' << consProbeArr[k][i] << std::endl;
//}
//}
// Iterate all samples
boost::posix_time::ptime now = boost::posix_time::second_clock::local_time();
std::cout << '[' << boost::posix_time::to_simple_string(now) << "] " << "SV annotation" << std::endl;
boost::progress_display show_progress( totalTarget );
typedef std::vector<uint32_t> TRefAlignCount;
typedef std::vector<TRefAlignCount> TFileRefAlignCount;
TFileRefAlignCount refAlignedReadCount(c.files.size(), TRefAlignCount());
TFileRefAlignCount refAlignedSpanCount(c.files.size(), TRefAlignCount());
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
refAlignedReadCount[file_c].resize(svs.size(), 0);
refAlignedSpanCount[file_c].resize(svs.size(), 0);
}
// Dump file
boost::iostreams::filtering_ostream dumpOut;
if (c.hasDumpFile) {
dumpOut.push(boost::iostreams::gzip_compressor());
dumpOut.push(boost::iostreams::file_sink(c.dumpfile.string().c_str(), std::ios_base::out | std::ios_base::binary));
dumpOut << "#svid\tbam\tqname\tchr\tpos\tmatechr\tmatepos\tmapq\ttype" << std::endl;
}
#pragma omp parallel for default(shared)
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
// Pair qualities and features
typedef boost::unordered_map<std::size_t, uint8_t> TQualities;
TQualities qualities;
TQualities qualitiestra;
typedef boost::unordered_map<std::size_t, bool> TClip;
TClip clip;
TClip cliptra;
// Iterate chromosomes
for(int32_t refIndex=0; refIndex < (int32_t) hdr[file_c]->n_targets; ++refIndex) {
++show_progress;
// Any SV breakpoints on this chromosome?
if (!svOnChr[refIndex]) continue;
// Check we have mapped reads on this chromosome
bool nodata = true;
std::string suffix("cram");
std::string str(c.files[file_c].string());
if ((str.size() >= suffix.size()) && (str.compare(str.size() - suffix.size(), suffix.size(), suffix) == 0)) nodata = false;
uint64_t mapped = 0;
uint64_t unmapped = 0;
hts_idx_get_stat(idx[file_c], refIndex, &mapped, &unmapped);
if (mapped) nodata = false;
if (nodata) continue;
// Coverage track
typedef uint16_t TCount;
uint32_t maxCoverage = std::numeric_limits<TCount>::max();
typedef std::vector<TCount> TCoverage;
TCoverage covFragment(hdr[file_c]->target_len[refIndex], 0);
TCoverage covBases(hdr[file_c]->target_len[refIndex], 0);
// Flag breakpoint regions
typedef boost::dynamic_bitset<> TBitSet;
TBitSet bpOccupied(hdr[file_c]->target_len[refIndex]);
for(uint32_t i = 0; i < bpRegion[refIndex].size(); ++i) {
for(int32_t k = bpRegion[refIndex][i].regionStart; k < bpRegion[refIndex][i].regionEnd; ++k) {
bpOccupied[k] = 1;
}
}
// Flag spanning breakpoints
typedef std::vector<SpanPoint> TSpanPoint;
TSpanPoint spanPoint;
typedef boost::dynamic_bitset<> TBitSet;
TBitSet spanBp(hdr[file_c]->target_len[refIndex]);
for(typename TSVs::iterator itSV = svs.begin(); itSV != svs.end(); ++itSV) {
if (itSV->peSupport == 0) continue;
if ((itSV->chr == refIndex) && (itSV->svStart < (int32_t) hdr[file_c]->target_len[refIndex])) {
spanBp[itSV->svStart] = 1;
spanPoint.push_back(SpanPoint(itSV->svStart, itSV->svt, itSV->id));
}
if ((itSV->chr2 == refIndex) && (itSV->svEnd < (int32_t) hdr[file_c]->target_len[refIndex])) {
spanBp[itSV->svEnd] = 1;
spanPoint.push_back(SpanPoint(itSV->svEnd, itSV->svt, itSV->id));
}
}
std::sort(spanPoint.begin(), spanPoint.end(), SortBp<SpanPoint>());
// Count reads
hts_itr_t* iter = sam_itr_queryi(idx[file_c], refIndex, 0, hdr[file_c]->target_len[refIndex]);
bam1_t* rec = bam_init1();
int32_t lastAlignedPos = 0;
std::set<std::size_t> lastAlignedPosReads;
while (sam_itr_next(samfile[file_c], iter, rec) >= 0) {
if (rec->core.flag & (BAM_FSECONDARY | BAM_FQCFAIL | BAM_FDUP | BAM_FSUPPLEMENTARY | BAM_FUNMAP | BAM_FMUNMAP)) continue;
if (rec->core.qual < c.minGenoQual) continue;
// Count aligned basepair (small InDels)
{
uint32_t rp = 0; // reference pointer
uint32_t* cigar = bam_get_cigar(rec);
for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {
if (bam_cigar_op(cigar[i]) == BAM_CMATCH) {
for(std::size_t k = 0; k<bam_cigar_oplen(cigar[i]);++k) {
if ((rec->core.pos + rp < hdr[file_c]->target_len[refIndex]) && (covBases[rec->core.pos + rp] < maxCoverage - 1)) ++covBases[rec->core.pos + rp];
++rp;
}
} else if (bam_cigar_op(cigar[i]) == BAM_CDEL) {
rp += bam_cigar_oplen(cigar[i]);
} else if (bam_cigar_op(cigar[i]) == BAM_CREF_SKIP) {
rp += bam_cigar_oplen(cigar[i]);
}
}
}
// Any (leading) soft clip
bool hasSoftClip = false;
bool hasClip = false;
int32_t leadingSC = 0;
uint32_t* cigar = bam_get_cigar(rec);
for (std::size_t i = 0; i < rec->core.n_cigar; ++i) {
if (bam_cigar_op(cigar[i]) == BAM_CSOFT_CLIP) {
hasClip = true;
hasSoftClip = true;
if (i == 0) leadingSC = bam_cigar_oplen(cigar[i]);
} else if (bam_cigar_op(cigar[i]) == BAM_CHARD_CLIP) hasClip = true;
}
// Check read length for junction annotation
if (rec->core.l_qseq >= (2 * c.minimumFlankSize)) {
bool bpvalid = false;
int32_t rbegin = std::max(0, (int32_t) rec->core.pos - leadingSC);
for(int32_t k = rbegin; ((k < (rec->core.pos + rec->core.l_qseq)) && (k < (int32_t) hdr[file_c]->target_len[refIndex])); ++k) {
if (bpOccupied[k]) {
bpvalid = true;
break;
}
}
if (bpvalid) {
// Fetch all relevant SVs
typename TBpRegion::iterator itBp = std::lower_bound(bpRegion[refIndex].begin(), bpRegion[refIndex].end(), BpRegion(rbegin), SortBp<BpRegion>());
for(; ((itBp != bpRegion[refIndex].end()) && (rec->core.pos + rec->core.l_qseq >= itBp->bppos)); ++itBp) {
if ((countMap[file_c][itBp->id].ref.size() + countMap[file_c][itBp->id].alt.size()) >= c.maxGenoReadCount) continue;
// Read spans breakpoint?
if ((hasSoftClip) || ((!hasClip) && (rec->core.pos + c.minimumFlankSize + itBp->homLeft <= itBp->bppos) && (rec->core.pos + rec->core.l_qseq >= itBp->bppos + c.minimumFlankSize + itBp->homRight))) {
std::string consProbe = consProbeArr[itBp->bpPoint][itBp->id];
std::string refProbe = refProbeArr[itBp->bpPoint][itBp->id];
// Get sequence
std::string sequence;
sequence.resize(rec->core.l_qseq);
uint8_t* seqptr = bam_get_seq(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) sequence[i] = "=ACMGRSVTWYHKDBN"[bam_seqi(seqptr, i)];
_adjustOrientation(sequence, itBp->bpPoint, itBp->svt);
// Compute alignment to alternative haplotype
typedef boost::multi_array<char, 2> TAlign;
TAlign alignAlt;
DnaScore<int> simple(5, -4, -4, -4);
AlignConfig<true, false> semiglobal;
int32_t scoreA = needle(consProbe, sequence, alignAlt, semiglobal, simple);
int32_t scoreAltThreshold = (int32_t) (c.flankQuality * consProbe.size() * simple.match + (1.0 - c.flankQuality) * consProbe.size() * simple.mismatch);
double scoreAlt = (double) scoreA / (double) scoreAltThreshold;
// Compute alignment to reference haplotype
TAlign alignRef;
int32_t scoreR = needle(refProbe, sequence, alignRef, semiglobal, simple);
int32_t scoreRefThreshold = (int32_t) (c.flankQuality * refProbe.size() * simple.match + (1.0 - c.flankQuality) * refProbe.size() * simple.mismatch);
double scoreRef = (double) scoreR / (double) scoreRefThreshold;
// Any confident alignment?
if ((scoreRef > 1) || (scoreAlt > 1)) {
// Debug alignment to REF and ALT
//std::cerr << "Alt:\t" << scoreAlt << "\tRef:\t" << scoreRef << std::endl;
//for(TAIndex i = 0; i< (TAIndex) alignAlt.shape()[0]; ++i) {
//for(TAIndex j = 0; j< (TAIndex) alignAlt.shape()[1]; ++j) std::cerr << alignAlt[i][j];
//std::cerr << std::endl;
//}
//for(TAIndex i = 0; i< (TAIndex) alignRef.shape()[0]; ++i) {
//for(TAIndex j = 0; j< (TAIndex) alignRef.shape()[1]; ++j) std::cerr << alignRef[i][j];
//std::cerr << std::endl;
//}
if (scoreRef > scoreAlt) {
// Account for reference bias
if (++refAlignedReadCount[file_c][itBp->id] % 2) {
TQuality quality;
quality.resize(rec->core.l_qseq);
uint8_t* qualptr = bam_get_qual(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) quality[i] = qualptr[i];
uint32_t rq = _getAlignmentQual(alignRef, quality);
if (rq >= c.minGenoQual) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
countMap[file_c][itBp->id].ref.push_back((uint8_t) std::min(rq, (uint32_t) rec->core.qual));
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++countMap[file_c][itBp->id].refh1;
else ++countMap[file_c][itBp->id].refh2;
}
}
}
}
} else {
TQuality quality;
quality.resize(rec->core.l_qseq);
uint8_t* qualptr = bam_get_qual(rec);
for (int i = 0; i < rec->core.l_qseq; ++i) quality[i] = qualptr[i];
uint32_t aq = _getAlignmentQual(alignAlt, quality);
if (aq >= c.minGenoQual) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
if (c.hasDumpFile) {
std::string svid(_addID(itBp->svt));
std::string padNumber = boost::lexical_cast<std::string>(itBp->id);
padNumber.insert(padNumber.begin(), 8 - padNumber.length(), '0');
svid += padNumber;
dumpOut << svid << "\t" << c.files[file_c].string() << "\t" << bam_get_qname(rec) << "\t" << hdr[file_c]->target_name[rec->core.tid] << "\t" << rec->core.pos << "\t" << hdr[file_c]->target_name[rec->core.mtid] << "\t" << rec->core.mpos << "\t" << (int32_t) rec->core.qual << "\tSR" << std::endl;
}
countMap[file_c][itBp->id].alt.push_back((uint8_t) std::min(aq, (uint32_t) rec->core.qual));
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++countMap[file_c][itBp->id].alth1;
else ++countMap[file_c][itBp->id].alth2;
}
}
}
}
}
}
}
}
}
// Read-count and spanning annotation
if ((!(rec->core.flag & BAM_FPAIRED)) || (!svOnChr[rec->core.mtid])) continue;
// Clean-up the read store for identical alignment positions
if (rec->core.pos > lastAlignedPos) {
lastAlignedPosReads.clear();
lastAlignedPos = rec->core.pos;
}
if (_firstPairObs(rec, lastAlignedPosReads)) {
// First read
lastAlignedPosReads.insert(hash_string(bam_get_qname(rec)));
std::size_t hv = hash_pair(rec);
if (rec->core.tid == rec->core.mtid) {
qualities[hv] = rec->core.qual;
clip[hv] = hasSoftClip;
} else {
qualitiestra[hv] = rec->core.qual;
cliptra[hv] = hasSoftClip;
}
} else {
// Second read
std::size_t hv = hash_pair_mate(rec);
uint8_t pairQuality = 0;
bool pairClip = false;
if (rec->core.tid == rec->core.mtid) {
if (qualities.find(hv) == qualities.end()) continue; // Mate discarded
pairQuality = std::min((uint8_t) qualities[hv], (uint8_t) rec->core.qual);
if ((clip[hv]) || (hasSoftClip)) pairClip = true;
qualities[hv] = 0;
clip[hv] = false;
} else {
if (qualitiestra.find(hv) == qualitiestra.end()) continue; // Mate discarded
pairQuality = std::min((uint8_t) qualitiestra[hv], (uint8_t) rec->core.qual);
if ((cliptra[hv]) || (hasSoftClip)) pairClip = true;
qualitiestra[hv] = 0;
cliptra[hv] = false;
}
// Pair quality
if (pairQuality < c.minGenoQual) continue; // Low quality pair
// Read-depth fragment counting
if (rec->core.tid == rec->core.mtid) {
// Count mid point (fragment counting)
int32_t midPoint = rec->core.pos + halfAlignmentLength(rec);
if ((midPoint < (int32_t) hdr[file_c]->target_len[refIndex]) && (covFragment[midPoint] < maxCoverage - 1)) ++covFragment[midPoint];
}
// Spanning counting
int32_t outerISize = 0;
if (rec->core.pos < rec->core.mpos) outerISize = rec->core.mpos + rec->core.l_qseq - rec->core.pos;
else outerISize = rec->core.pos + rec->core.l_qseq - rec->core.mpos;
// Get the library information
if (sampleLib[file_c].median == 0) continue; // Single-end library or non-valid library
// Normal spanning pair
if ((!pairClip) && (getSVType(rec->core) == 2) && (outerISize >= sampleLib[file_c].minNormalISize) && (outerISize <= sampleLib[file_c].maxNormalISize) && (rec->core.tid==rec->core.mtid)) {
// Take X% of the outerisize as the spanned interval
int32_t spanlen = 0.8 * outerISize;
int32_t pbegin = std::min((int32_t) rec->core.pos, (int32_t) rec->core.mpos);
int32_t st = pbegin + (outerISize - spanlen) / 2;
bool spanvalid = false;
for(int32_t i = st; ((i < (st + spanlen)) && (i < (int32_t) hdr[file_c]->target_len[refIndex])); ++i) {
if (spanBp[i]) {
spanvalid = true;
break;
}
}
if (spanvalid) {
// Fetch all relevant SVs
typename TSpanPoint::iterator itSpan = std::lower_bound(spanPoint.begin(), spanPoint.end(), SpanPoint(st), SortBp<SpanPoint>());
for(; ((itSpan != spanPoint.end()) && (st + spanlen >= itSpan->bppos)); ++itSpan) {
// Account for reference bias
if (++refAlignedSpanCount[file_c][itSpan->id] % 2) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
spanMap[file_c][itSpan->id].ref.push_back(pairQuality);
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++spanMap[file_c][itSpan->id].refh1;
else ++spanMap[file_c][itSpan->id].refh2;
}
}
}
}
}
}
// Abnormal spanning coverage
if ((getSVType(rec->core) != 2) || (outerISize < sampleLib[file_c].minNormalISize) || (outerISize > sampleLib[file_c].maxNormalISize) || (rec->core.tid!=rec->core.mtid)) {
// SV type
int32_t svt = _isizeMappingPos(rec, sampleLib[file_c].maxISizeCutoff);
if (svt == -1) continue;
// Spanning a breakpoint?
bool spanvalid = false;
int32_t pbegin = rec->core.pos;
int32_t pend = std::min((int32_t) rec->core.pos + sampleLib[file_c].maxNormalISize, (int32_t) hdr[file_c]->target_len[refIndex]);
if (rec->core.flag & BAM_FREVERSE) {
pbegin = std::max(0, (int32_t) rec->core.pos + rec->core.l_qseq - sampleLib[file_c].maxNormalISize);
pend = std::min((int32_t) rec->core.pos + rec->core.l_qseq, (int32_t) hdr[file_c]->target_len[refIndex]);
}
for(int32_t i = pbegin; i < pend; ++i) {
if (spanBp[i]) {
spanvalid = true;
break;
}
}
if (spanvalid) {
// Fetch all relevant SVs
typename TSpanPoint::iterator itSpan = std::lower_bound(spanPoint.begin(), spanPoint.end(), SpanPoint(pbegin), SortBp<SpanPoint>());
for(; ((itSpan != spanPoint.end()) && (pend >= itSpan->bppos)); ++itSpan) {
if (svt == itSpan->svt) {
uint8_t* hpptr = bam_aux_get(rec, "HP");
#pragma omp critical
{
if (c.hasDumpFile) {
std::string svid(_addID(itSpan->svt));
std::string padNumber = boost::lexical_cast<std::string>(itSpan->id);
padNumber.insert(padNumber.begin(), 8 - padNumber.length(), '0');
svid += padNumber;
dumpOut << svid << "\t" << c.files[file_c].string() << "\t" << bam_get_qname(rec) << "\t" << hdr[file_c]->target_name[rec->core.tid] << "\t" << rec->core.pos << "\t" << hdr[file_c]->target_name[rec->core.mtid] << "\t" << rec->core.mpos << "\t" << (int32_t) rec->core.qual << "\tPE" << std::endl;
}
spanMap[file_c][itSpan->id].alt.push_back(pairQuality);
if (hpptr) {
c.isHaplotagged = true;
int hap = bam_aux2i(hpptr);
if (hap == 1) ++spanMap[file_c][itSpan->id].alth1;
else ++spanMap[file_c][itSpan->id].alth2;
}
}
}
}
}
}
}
}
// Clean-up
bam_destroy1(rec);
hts_itr_destroy(iter);
qualities.clear();
clip.clear();
// Assign fragment and base counts to SVs
for(uint32_t i = 0; i < svs.size(); ++i) {
if (svs[i].chr == refIndex) {
// Small or large SV
bool smallSV = false;
int32_t halfSize = (svs[i].svEnd - svs[i].svStart)/2;
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
halfSize = 500;
smallSV = true;
} else {
if ((svs[i].svEnd - svs[i].svStart) <= c.indelsize) smallSV = true;
}
// Left region
int32_t lstart = std::max(svs[i].svStart - halfSize, 0);
int32_t lend = svs[i].svStart;
int32_t covbase = 0;
for(uint32_t k = lstart; ((k < (uint32_t) lend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].leftRC = covbase;
// Actual SV
covbase = 0;
int32_t mstart = svs[i].svStart;
int32_t mend = svs[i].svEnd;
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
mstart = std::max(svs[i].svStart - halfSize, 0);
mend = std::min(svs[i].svStart + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
}
for(uint32_t k = mstart; ((k < (uint32_t) mend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].rc = covbase;
// Right region
covbase = 0;
int32_t rstart = svs[i].svEnd;
int32_t rend = std::min(svs[i].svEnd + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
if ((_translocation(svs[i].svt)) || (svs[i].svt == 4)) {
rstart = svs[i].svStart;
rend = std::min(svs[i].svStart + halfSize, (int32_t) hdr[0]->target_len[refIndex]);
}
for(uint32_t k = rstart; ((k < (uint32_t) rend) && (k < hdr[0]->target_len[refIndex])); ++k) {
if (smallSV) covbase += covBases[k];
else covbase += covFragment[k];
}
covCount[file_c][svs[i].id].rightRC = covbase;
}
}
}
}
// Clean-up
for(unsigned int file_c = 0; file_c < c.files.size(); ++file_c) {
bam_hdr_destroy(hdr[file_c]);
hts_idx_destroy(idx[file_c]);
sam_close(samfile[file_c]);
}
}
}
#endif
|
optimizer.c | /* Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*
* Author: Qiming Sun <osirpt.sun@gmail.com>
*/
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <assert.h>
#include "cint.h"
#include "cvhf.h"
#include "optimizer.h"
#define MAX(I,J) ((I) > (J) ? (I) : (J))
int int2e_sph();
int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter,
int *atm, int natm, int *bas, int nbas, double *env);
void CVHFinit_optimizer(CVHFOpt **opt, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFOpt *opt0 = (CVHFOpt *)malloc(sizeof(CVHFOpt));
opt0->nbas = nbas;
opt0->direct_scf_cutoff = 1e-14;
opt0->q_cond = NULL;
opt0->dm_cond = NULL;
opt0->fprescreen = &CVHFnoscreen;
opt0->r_vkscreen = &CVHFr_vknoscreen;
*opt = opt0;
}
void CVHFdel_optimizer(CVHFOpt **opt)
{
CVHFOpt *opt0 = *opt;
if (!opt0) {
return;
}
if (!opt0->q_cond) {
free(opt0->q_cond);
}
if (!opt0->dm_cond) {
free(opt0->dm_cond);
}
free(opt0);
*opt = NULL;
}
int CVHFnoscreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
return 1;
}
int CVHFnr_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1;
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > opt->direct_scf_cutoff;
}
int CVHFnrs8_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
double *q_cond = opt->q_cond;
double *dm_cond = opt->dm_cond;
assert(q_cond);
assert(dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = q_cond[i*n+j] * q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&((4*dm_cond[j*n+i] > dmin)
|| (4*dm_cond[l*n+k] > dmin)
|| ( dm_cond[j*n+k] > dmin)
|| ( dm_cond[j*n+l] > dmin)
|| ( dm_cond[i*n+k] > dmin)
|| ( dm_cond[i*n+l] > dmin));
}
int CVHFnrs8_vj_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[k*n+l];
return qijkl > direct_scf_cutoff
&&((4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff)
|| (4*qijkl*opt->dm_cond[l*n+k] > direct_scf_cutoff));
}
int CVHFnrs8_vk_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int i = shls[0];
int j = shls[1];
int k = shls[2];
int l = shls[3];
int n = opt->nbas;
double *q_cond = opt->q_cond;
double *dm_cond = opt->dm_cond;
assert(q_cond);
assert(dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
assert(l < n);
double qijkl = q_cond[i*n+j] * q_cond[k*n+l];
double dmin = opt->direct_scf_cutoff / qijkl;
return qijkl > opt->direct_scf_cutoff
&&(( dm_cond[j*n+k] > dmin)
|| ( dm_cond[j*n+l] > dmin)
|| ( dm_cond[i*n+k] > dmin)
|| ( dm_cond[i*n+l] > dmin));
}
// return flag to decide whether transpose01324
int CVHFr_vknoscreen(int *shls, CVHFOpt *opt,
double **dms_cond, int n_dm, double *dm_atleast,
int *atm, int *bas, double *env)
{
int idm;
for (idm = 0; idm < n_dm; idm++) {
dms_cond[idm] = NULL;
}
*dm_atleast = 0;
return 1;
}
int CVHFnr3c2e_vj_pass1_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > direct_scf_cutoff
&& (4*qijkl*opt->dm_cond[j*n+i] > direct_scf_cutoff);
}
int CVHFnr3c2e_vj_pass2_prescreen(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double direct_scf_cutoff = opt->direct_scf_cutoff;
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > direct_scf_cutoff
&& (4*qijkl*opt->dm_cond[k] > direct_scf_cutoff);
}
int CVHFnr3c2e_schwarz_cond(int *shls, CVHFOpt *opt,
int *atm, int *bas, double *env)
{
if (!opt) {
return 1; // no screen
}
int n = opt->nbas;
int i = shls[0];
int j = shls[1];
// Be careful with the range of basis k, which is between nbas and
// nbas+nauxbas. See shls_slice in df_jk.get_j function.
int k = shls[2] - n;
assert(opt->q_cond);
assert(opt->dm_cond);
assert(i < n);
assert(j < n);
assert(k < n);
double qijkl = opt->q_cond[i*n+j] * opt->q_cond[n*n+k];
return qijkl > opt->direct_scf_cutoff;
}
void CVHFset_direct_scf_cutoff(CVHFOpt *opt, double cutoff)
{
opt->direct_scf_cutoff = cutoff;
}
double CVHFget_direct_scf_cutoff(CVHFOpt *opt)
{
return opt->direct_scf_cutoff;
}
void CVHFsetnr_direct_scf(CVHFOpt *opt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
/* This memory is released in void CVHFdel_optimizer, Don't know
* why valgrind raises memory leak here */
if (opt->q_cond) {
free(opt->q_cond);
}
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
nbas = opt->nbas;
opt->q_cond = (double *)malloc(sizeof(double) * nbas*nbas);
CVHFset_int2e_q_cond(intor, cintopt, opt->q_cond, ao_loc,
atm, natm, bas, nbas, env);
}
/*
* Non-relativistic 2-electron integrals
*/
void CVHFset_int2e_q_cond(int (*intor)(), CINTOpt *cintopt, double *q_cond,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
int shls_slice[] = {0, nbas};
const int cache_size = GTOmax_cache_size(intor, shls_slice, 1,
atm, natm, bas, nbas, env);
#pragma omp parallel
{
double qtmp, tmp;
int ij, i, j, di, dj, ish, jsh;
int shls[4];
double *cache = malloc(sizeof(double) * cache_size);
di = 0;
for (ish = 0; ish < nbas; ish++) {
dj = ao_loc[ish+1] - ao_loc[ish];
di = MAX(di, dj);
}
double *buf = malloc(sizeof(double) * di*di*di*di);
#pragma omp for schedule(dynamic, 4)
for (ij = 0; ij < nbas*(nbas+1)/2; ij++) {
ish = (int)(sqrt(2*ij+.25) - .5 + 1e-7);
jsh = ij - ish*(ish+1)/2;
di = ao_loc[ish+1] - ao_loc[ish];
dj = ao_loc[jsh+1] - ao_loc[jsh];
shls[0] = ish;
shls[1] = jsh;
shls[2] = ish;
shls[3] = jsh;
qtmp = 1e-100;
if (0 != (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env,
cintopt, cache)) {
for (i = 0; i < di; i++) {
for (j = 0; j < dj; j++) {
tmp = fabs(buf[i+di*j+di*dj*i+di*dj*di*j]);
qtmp = MAX(qtmp, tmp);
} }
qtmp = sqrt(qtmp);
}
q_cond[ish*nbas+jsh] = qtmp;
q_cond[jsh*nbas+ish] = qtmp;
}
free(buf);
free(cache);
}
}
void CVHFset_q_cond(CVHFOpt *opt, double *q_cond, int len)
{
if (opt->q_cond) {
free(opt->q_cond);
}
opt->q_cond = (double *)malloc(sizeof(double) * len);
memcpy(opt->q_cond, q_cond, sizeof(double) * len);
}
void CVHFsetnr_direct_scf_dm(CVHFOpt *opt, double *dm, int nset, int *ao_loc,
int *atm, int natm, int *bas, int nbas, double *env)
{
if (opt->dm_cond) { // NOT reuse opt->dm_cond because nset may be diff in different call
free(opt->dm_cond);
}
// nbas in the input arguments may different to opt->nbas.
// Use opt->nbas because it is used in the prescreen function
nbas = opt->nbas;
opt->dm_cond = (double *)malloc(sizeof(double) * nbas*nbas);
memset(opt->dm_cond, 0, sizeof(double)*nbas*nbas);
const size_t nao = ao_loc[nbas];
double dmax, tmp;
int i, j, ish, jsh;
int iset;
double *pdm;
for (ish = 0; ish < nbas; ish++) {
for (jsh = 0; jsh <= ish; jsh++) {
dmax = 0;
for (iset = 0; iset < nset; iset++) {
pdm = dm + nao*nao*iset;
for (i = ao_loc[ish]; i < ao_loc[ish+1]; i++) {
for (j = ao_loc[jsh]; j < ao_loc[jsh+1]; j++) {
// symmetrize dm_cond because nrs8_prescreen only tests the lower (or upper)
// triangular part of dm_cond. Without the symmetrization, some integrals may be
// incorrectly skipped.
tmp = .5 * (fabs(pdm[i*nao+j]) + fabs(pdm[j*nao+i]));
dmax = MAX(dmax, tmp);
} }
}
opt->dm_cond[ish*nbas+jsh] = dmax;
opt->dm_cond[jsh*nbas+ish] = dmax;
} }
}
void CVHFset_dm_cond(CVHFOpt *opt, double *dm_cond, int len)
{
if (opt->dm_cond) {
free(opt->dm_cond);
}
opt->dm_cond = (double *)malloc(sizeof(double) * len);
memcpy(opt->dm_cond, dm_cond, sizeof(double) * len);
}
/*
*************************************************
*/
void CVHFnr_optimizer(CVHFOpt **vhfopt, int (*intor)(), CINTOpt *cintopt,
int *ao_loc, int *atm, int natm,
int *bas, int nbas, double *env)
{
CVHFinit_optimizer(vhfopt, atm, natm, bas, nbas, env);
(*vhfopt)->fprescreen = &CVHFnrs8_prescreen;
CVHFsetnr_direct_scf(*vhfopt, intor, cintopt, ao_loc,
atm, natm, bas, nbas, env);
}
|
hermv_c_dia_u_lo.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef _OPENMP
#include <omp.h>
#endif
#include <memory.h>
#include <stdlib.h>
alphasparse_status_t
ONAME(const ALPHA_Complex alpha,
const ALPHA_SPMAT_DIA *A,
const ALPHA_Complex *x,
const ALPHA_Complex beta,
ALPHA_Complex *y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
if(m != n) return ALPHA_SPARSE_STATUS_INVALID_VALUE;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
if(dis < 0)
{
const ALPHA_INT row_start = -dis;
const ALPHA_INT col_start = 0;
const ALPHA_INT nnz = m + dis;
const ALPHA_INT start = i * A->lval;
for(ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Complex v,v_c;
ALPHA_Complex val_orig = A->values[start + row_start + j];
ALPHA_Complex val_conj = {val_orig.real,-val_orig.imag};
alpha_mul(v, alpha, val_orig);
alpha_mul(v_c, alpha, val_conj);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
alpha_madde(tmp[threadId][col_start + j], v_c, x[row_start + j]);
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], beta, y[i]);
alpha_madde(y[i], alpha, x[i]);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
array_section_implicit_capture.c | // RUN: %libomptarget-compile-generic -fopenmp-version=51
// RUN: %libomptarget-run-generic 2>&1 \
// RUN: | %fcheck-generic
#include <stdio.h>
#include <stdlib.h>
#define N 1024
#define FROM 64
#define LENGTH 128
int main() {
float *A = (float *)malloc(N * sizeof(float));
float *B = (float *)malloc(N * sizeof(float));
float *C = (float *)malloc(N * sizeof(float));
for (int i = 0; i < N; i++) {
C[i] = 0.0;
}
for (int i = 0; i < N; i++) {
A[i] = i;
B[i] = 2 * i;
}
#pragma omp target enter data map(to : A [FROM:LENGTH], B [FROM:LENGTH])
#pragma omp target enter data map(alloc : C [FROM:LENGTH])
// A, B and C have been mapped starting at index FROM, but inside the kernel
// they are captured implicitly so the library must look them up using their
// base address.
#pragma omp target
{
for (int i = FROM; i < FROM + LENGTH; i++) {
C[i] = A[i] + B[i];
}
}
#pragma omp target exit data map(from : C [FROM:LENGTH])
#pragma omp target exit data map(delete : A [FROM:LENGTH], B [FROM:LENGTH])
int errors = 0;
for (int i = FROM; i < FROM + LENGTH; i++)
if (C[i] != A[i] + B[i])
++errors;
// CHECK: Success
if (errors)
fprintf(stderr, "Failure\n");
else
fprintf(stderr, "Success\n");
free(A);
free(B);
free(C);
return 0;
}
|
image.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% %
% MagickCore Image Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/attribute.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/channel.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colormap.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/delegate.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/geometry.h"
#include "MagickCore/histogram.h"
#include "MagickCore/image-private.h"
#include "MagickCore/list.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/magick-private.h"
#include "MagickCore/memory_.h"
#include "MagickCore/memory-private.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/random_.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/string-private.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/threshold.h"
#include "MagickCore/timer.h"
#include "MagickCore/timer-private.h"
#include "MagickCore/token.h"
#include "MagickCore/token-private.h"
#include "MagickCore/utility.h"
#include "MagickCore/utility-private.h"
#include "MagickCore/version.h"
#include "MagickCore/xwindow-private.h"
/*
Constant declaration.
*/
const char
BackgroundColor[] = "#ffffff", /* white */
BorderColor[] = "#dfdfdf", /* gray */
DefaultTileFrame[] = "15x15+3+3",
DefaultTileGeometry[] = "120x120+4+3>",
DefaultTileLabel[] = "%f\n%G\n%b",
ForegroundColor[] = "#000", /* black */
LoadImageTag[] = "Load/Image",
LoadImagesTag[] = "Load/Images",
MatteColor[] = "#bdbdbd", /* gray */
PSDensityGeometry[] = "72.0x72.0",
PSPageGeometry[] = "612x792",
SaveImageTag[] = "Save/Image",
SaveImagesTag[] = "Save/Images",
TransparentColor[] = "#00000000"; /* transparent black */
const double
DefaultResolution = 72.0;
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImage() returns a pointer to an image structure initialized to
% default values.
%
% The format of the AcquireImage method is:
%
% Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AcquireImage(const ImageInfo *image_info,
ExceptionInfo *exception)
{
const char
*option;
Image
*image;
MagickStatusType
flags;
/*
Allocate image structure.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
image=(Image *) AcquireCriticalMemory(sizeof(*image));
(void) memset(image,0,sizeof(*image));
/*
Initialize Image structure.
*/
(void) CopyMagickString(image->magick,"MIFF",MagickPathExtent);
image->storage_class=DirectClass;
image->depth=MAGICKCORE_QUANTUM_DEPTH;
image->colorspace=sRGBColorspace;
image->rendering_intent=PerceptualIntent;
image->gamma=1.000f/2.200f;
image->chromaticity.red_primary.x=0.6400f;
image->chromaticity.red_primary.y=0.3300f;
image->chromaticity.red_primary.z=0.0300f;
image->chromaticity.green_primary.x=0.3000f;
image->chromaticity.green_primary.y=0.6000f;
image->chromaticity.green_primary.z=0.1000f;
image->chromaticity.blue_primary.x=0.1500f;
image->chromaticity.blue_primary.y=0.0600f;
image->chromaticity.blue_primary.z=0.7900f;
image->chromaticity.white_point.x=0.3127f;
image->chromaticity.white_point.y=0.3290f;
image->chromaticity.white_point.z=0.3583f;
image->interlace=NoInterlace;
image->ticks_per_second=UndefinedTicksPerSecond;
image->compose=OverCompositeOp;
(void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color,
exception);
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image->transparent_color,exception);
GetTimerInfo(&image->timer);
image->cache=AcquirePixelCache(0);
image->channel_mask=DefaultChannels;
image->channel_map=AcquirePixelChannelMap();
image->blob=CloneBlobInfo((BlobInfo *) NULL);
image->timestamp=GetMagickTime();
image->debug=IsEventLogging();
image->reference_count=1;
image->semaphore=AcquireSemaphoreInfo();
image->signature=MagickCoreSignature;
if (image_info == (ImageInfo *) NULL)
return(image);
/*
Transfer image info.
*/
SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue :
MagickFalse);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick_filename,image_info->filename,
MagickPathExtent);
(void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent);
if (image_info->size != (char *) NULL)
{
(void) ParseAbsoluteGeometry(image_info->size,&image->extract_info);
image->columns=image->extract_info.width;
image->rows=image->extract_info.height;
image->offset=image->extract_info.x;
image->extract_info.x=0;
image->extract_info.y=0;
}
if (image_info->extract != (char *) NULL)
{
RectangleInfo
geometry;
(void) memset(&geometry,0,sizeof(geometry));
flags=ParseAbsoluteGeometry(image_info->extract,&geometry);
if (((flags & XValue) != 0) || ((flags & YValue) != 0))
{
image->extract_info=geometry;
Swap(image->columns,image->extract_info.width);
Swap(image->rows,image->extract_info.height);
}
}
image->compression=image_info->compression;
image->quality=image_info->quality;
image->endian=image_info->endian;
image->interlace=image_info->interlace;
image->units=image_info->units;
if (image_info->density != (char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(image_info->density,&geometry_info);
if ((flags & RhoValue) != 0)
image->resolution.x=geometry_info.rho;
image->resolution.y=image->resolution.x;
if ((flags & SigmaValue) != 0)
image->resolution.y=geometry_info.sigma;
}
if (image_info->page != (char *) NULL)
{
char
*geometry;
image->page=image->extract_info;
geometry=GetPageGeometry(image_info->page);
(void) ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
if (image_info->depth != 0)
image->depth=image_info->depth;
image->dither=image_info->dither;
image->matte_color=image_info->matte_color;
image->background_color=image_info->background_color;
image->border_color=image_info->border_color;
image->transparent_color=image_info->transparent_color;
image->ping=image_info->ping;
image->progress_monitor=image_info->progress_monitor;
image->client_data=image_info->client_data;
if (image_info->cache != (void *) NULL)
ClonePixelCacheMethods(image->cache,image_info->cache);
/*
Set all global options that map to per-image settings.
*/
(void) SyncImageSettings(image_info,image,exception);
/*
Global options that are only set for new images.
*/
option=GetImageOption(image_info,"delay");
if (option != (const char *) NULL)
{
GeometryInfo
geometry_info;
flags=ParseGeometry(option,&geometry_info);
if ((flags & GreaterValue) != 0)
{
if (image->delay > (size_t) floor(geometry_info.rho+0.5))
image->delay=(size_t) floor(geometry_info.rho+0.5);
}
else
if ((flags & LessValue) != 0)
{
if (image->delay < (size_t) floor(geometry_info.rho+0.5))
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
else
image->delay=(size_t) floor(geometry_info.rho+0.5);
if ((flags & SigmaValue) != 0)
image->ticks_per_second=(ssize_t) floor(geometry_info.sigma+0.5);
}
option=GetImageOption(image_info,"dispose");
if (option != (const char *) NULL)
image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions,
MagickFalse,option);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireImageInfo() allocates the ImageInfo structure.
%
% The format of the AcquireImageInfo method is:
%
% ImageInfo *AcquireImageInfo(void)
%
*/
MagickExport ImageInfo *AcquireImageInfo(void)
{
ImageInfo
*image_info;
image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info));
GetImageInfo(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e N e x t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireNextImage() initializes the next image in a sequence to
% default values. The next member of image points to the newly allocated
% image. If there is a memory shortage, next is assigned NULL.
%
% The format of the AcquireNextImage method is:
%
% void AcquireNextImage(const ImageInfo *image_info,Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: Many of the image default values are set from this
% structure. For example, filename, compression, depth, background color,
% and others.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image,
ExceptionInfo *exception)
{
/*
Allocate image structure.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
image->next=AcquireImage(image_info,exception);
if (GetNextImageInList(image) == (Image *) NULL)
return;
(void) CopyMagickString(GetNextImageInList(image)->filename,image->filename,
MagickPathExtent);
if (image_info != (ImageInfo *) NULL)
(void) CopyMagickString(GetNextImageInList(image)->filename,
image_info->filename,MagickPathExtent);
DestroyBlob(GetNextImageInList(image));
image->next->blob=ReferenceBlob(image->blob);
image->next->endian=image->endian;
image->next->scene=image->scene+1;
image->next->previous=image;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A p p e n d I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AppendImages() takes all images from the current image pointer to the end
% of the image list and appends them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting effects how the image is justified in the
% final image.
%
% The format of the AppendImages method is:
%
% Image *AppendImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *AppendImages(const Image *images,
const MagickBooleanType stack,ExceptionInfo *exception)
{
#define AppendImageTag "Append/Image"
CacheView
*append_view;
Image
*append_image;
MagickBooleanType
homogeneous_colorspace,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
depth,
height,
number_images,
width;
ssize_t
x_offset,
y,
y_offset;
/*
Compute maximum area of appended area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
alpha_trait=images->alpha_trait;
number_images=1;
width=images->columns;
height=images->rows;
depth=images->depth;
homogeneous_colorspace=MagickTrue;
next=GetNextImageInList(images);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->depth > depth)
depth=next->depth;
if (next->colorspace != images->colorspace)
homogeneous_colorspace=MagickFalse;
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
continue;
}
width+=next->columns;
if (next->rows > height)
height=next->rows;
}
/*
Append images.
*/
append_image=CloneImage(images,width,height,MagickTrue,exception);
if (append_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse)
{
append_image=DestroyImage(append_image);
return((Image *) NULL);
}
if (homogeneous_colorspace == MagickFalse)
(void) SetImageColorspace(append_image,sRGBColorspace,exception);
append_image->depth=depth;
append_image->alpha_trait=alpha_trait;
append_image->page=images->page;
(void) SetImageBackgroundColor(append_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
next=images;
append_view=AcquireAuthenticCacheView(append_image,exception);
for (n=0; n < (MagickOffsetType) number_images; n++)
{
CacheView
*image_view;
MagickBooleanType
proceed;
SetGeometry(append_image,&geometry);
GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry);
if (stack != MagickFalse)
x_offset-=geometry.x;
else
y_offset-=geometry.y;
image_view=AcquireVirtualCacheView(next,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(next,next,next->rows,1)
#endif
for (y=0; y < (ssize_t) next->rows; y++)
{
MagickBooleanType
sync;
PixelInfo
pixel;
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception);
q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset,
next->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
GetPixelInfo(next,&pixel);
for (x=0; x < (ssize_t) next->columns; x++)
{
GetPixelInfoPixel(next,p,&pixel);
SetPixelViaPixelInfo(append_image,&pixel,q);
p+=GetPixelChannels(next);
q+=GetPixelChannels(append_image);
}
sync=SyncCacheViewAuthenticPixels(append_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (stack == MagickFalse)
{
x_offset+=(ssize_t) next->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) next->rows;
}
proceed=SetImageProgress(append_image,AppendImageTag,n,number_images);
if (proceed == MagickFalse)
break;
next=GetNextImageInList(next);
}
append_view=DestroyCacheView(append_view);
if (status == MagickFalse)
append_image=DestroyImage(append_image);
return(append_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C a t c h I m a g e E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CatchImageException() returns if no exceptions are found in the image
% sequence, otherwise it determines the most severe exception and reports
% it as a warning or error depending on the severity.
%
% The format of the CatchImageException method is:
%
% ExceptionType CatchImageException(Image *image)
%
% A description of each parameter follows:
%
% o image: An image sequence.
%
*/
MagickExport ExceptionType CatchImageException(Image *image)
{
ExceptionInfo
*exception;
ExceptionType
severity;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
exception=AcquireExceptionInfo();
CatchException(exception);
severity=exception->severity;
exception=DestroyExceptionInfo(exception);
return(severity);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l i p I m a g e P a t h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ClipImagePath() sets the image clip mask based any clipping path information
% if it exists.
%
% The format of the ClipImagePath method is:
%
% MagickBooleanType ClipImagePath(Image *image,const char *pathname,
% const MagickBooleanType inside,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o pathname: name of clipping path resource. If name is preceded by #, use
% clipping path numbered by name.
%
% o inside: if non-zero, later operations take effect inside clipping path.
% Otherwise later operations take effect outside clipping path.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception)
{
return(ClipImagePath(image,"#1",MagickTrue,exception));
}
MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname,
const MagickBooleanType inside,ExceptionInfo *exception)
{
#define ClipImagePathTag "ClipPath/Image"
char
*property;
const char
*value;
Image
*clip_mask;
ImageInfo
*image_info;
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(pathname != NULL);
property=AcquireString(pathname);
(void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s",
pathname);
value=GetImageProperty(image,property,exception);
property=DestroyString(property);
if (value == (const char *) NULL)
{
ThrowFileException(exception,OptionError,"NoClipPathDefined",
image->filename);
return(MagickFalse);
}
image_info=AcquireImageInfo();
(void) CopyMagickString(image_info->filename,image->filename,
MagickPathExtent);
(void) ConcatenateMagickString(image_info->filename,pathname,
MagickPathExtent);
clip_mask=BlobToImage(image_info,value,strlen(value),exception);
image_info=DestroyImageInfo(image_info);
if (clip_mask == (Image *) NULL)
return(MagickFalse);
if (clip_mask->storage_class == PseudoClass)
{
(void) SyncImage(clip_mask,exception);
if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse)
return(MagickFalse);
}
if (inside == MagickFalse)
(void) NegateImage(clip_mask,MagickFalse,exception);
(void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent,
"8BIM:1999,2998:%s\nPS",pathname);
(void) SetImageMask(image,WritePixelMask,clip_mask,exception);
clip_mask=DestroyImage(clip_mask);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImage() copies an image and returns the copy as a new image object.
%
% If the specified columns and rows is 0, an exact copy of the image is
% returned, otherwise the pixel data is undefined and must be initialized
% with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On
% failure, a NULL image is returned and exception describes the reason for the
% failure.
%
% The format of the CloneImage method is:
%
% Image *CloneImage(const Image *image,const size_t columns,
% const size_t rows,const MagickBooleanType orphan,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: the number of columns in the cloned image.
%
% o rows: the number of rows in the cloned image.
%
% o detach: With a value other than 0, the cloned image is detached from
% its parent I/O stream.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *CloneImage(const Image *image,const size_t columns,
const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception)
{
Image
*clone_image;
double
scale;
size_t
length;
/*
Clone the image.
*/
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if ((image->columns == 0) || (image->rows == 0))
{
(void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError,
"NegativeOrZeroImageSize","`%s'",image->filename);
return((Image *) NULL);
}
clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image));
(void) memset(clone_image,0,sizeof(*clone_image));
clone_image->signature=MagickCoreSignature;
clone_image->storage_class=image->storage_class;
clone_image->number_channels=image->number_channels;
clone_image->number_meta_channels=image->number_meta_channels;
clone_image->metacontent_extent=image->metacontent_extent;
clone_image->colorspace=image->colorspace;
clone_image->alpha_trait=image->alpha_trait;
clone_image->channels=image->channels;
clone_image->mask_trait=image->mask_trait;
clone_image->columns=image->columns;
clone_image->rows=image->rows;
clone_image->dither=image->dither;
clone_image->image_info=CloneImageInfo(image->image_info);
(void) CloneImageProfiles(clone_image,image);
(void) CloneImageProperties(clone_image,image);
(void) CloneImageArtifacts(clone_image,image);
GetTimerInfo(&clone_image->timer);
if (image->ascii85 != (void *) NULL)
Ascii85Initialize(clone_image);
clone_image->extent=image->extent;
clone_image->magick_columns=image->magick_columns;
clone_image->magick_rows=image->magick_rows;
clone_image->type=image->type;
clone_image->channel_mask=image->channel_mask;
clone_image->channel_map=ClonePixelChannelMap(image->channel_map);
(void) CopyMagickString(clone_image->magick_filename,image->magick_filename,
MagickPathExtent);
(void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent);
(void) CopyMagickString(clone_image->filename,image->filename,
MagickPathExtent);
clone_image->progress_monitor=image->progress_monitor;
clone_image->client_data=image->client_data;
clone_image->reference_count=1;
clone_image->next=image->next;
clone_image->previous=image->previous;
clone_image->list=NewImageList();
if (detach == MagickFalse)
clone_image->blob=ReferenceBlob(image->blob);
else
{
clone_image->next=NewImageList();
clone_image->previous=NewImageList();
clone_image->blob=CloneBlobInfo((BlobInfo *) NULL);
}
clone_image->ping=image->ping;
clone_image->debug=IsEventLogging();
clone_image->semaphore=AcquireSemaphoreInfo();
if (image->colormap != (PixelInfo *) NULL)
{
/*
Allocate and copy the image colormap.
*/
clone_image->colors=image->colors;
length=(size_t) image->colors;
clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1,
sizeof(*clone_image->colormap));
if (clone_image->colormap == (PixelInfo *) NULL)
{
clone_image=DestroyImage(clone_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
(void) memcpy(clone_image->colormap,image->colormap,length*
sizeof(*clone_image->colormap));
}
if ((columns == 0) || (rows == 0))
{
if (image->montage != (char *) NULL)
(void) CloneString(&clone_image->montage,image->montage);
if (image->directory != (char *) NULL)
(void) CloneString(&clone_image->directory,image->directory);
clone_image->cache=ReferencePixelCache(image->cache);
return(clone_image);
}
scale=1.0;
if (image->columns != 0)
scale=(double) columns/(double) image->columns;
clone_image->page.width=(size_t) floor(scale*image->page.width+0.5);
clone_image->page.x=(ssize_t) ceil(scale*image->page.x-0.5);
clone_image->tile_offset.x=(ssize_t) ceil(scale*image->tile_offset.x-0.5);
scale=1.0;
if (image->rows != 0)
scale=(double) rows/(double) image->rows;
clone_image->page.height=(size_t) floor(scale*image->page.height+0.5);
clone_image->page.y=(ssize_t) ceil(scale*image->page.y-0.5);
clone_image->tile_offset.y=(ssize_t) ceil(scale*image->tile_offset.y-0.5);
clone_image->cache=ClonePixelCache(image->cache);
if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse)
clone_image=DestroyImage(clone_image);
return(clone_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageInfo() makes a copy of the given image info structure. If
% NULL is specified, a new image info structure is created initialized to
% default values.
%
% The format of the CloneImageInfo method is:
%
% ImageInfo *CloneImageInfo(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info)
{
ImageInfo
*clone_info;
clone_info=AcquireImageInfo();
if (image_info == (ImageInfo *) NULL)
return(clone_info);
clone_info->compression=image_info->compression;
clone_info->temporary=image_info->temporary;
clone_info->adjoin=image_info->adjoin;
clone_info->antialias=image_info->antialias;
clone_info->scene=image_info->scene;
clone_info->number_scenes=image_info->number_scenes;
clone_info->depth=image_info->depth;
if (image_info->size != (char *) NULL)
(void) CloneString(&clone_info->size,image_info->size);
if (image_info->extract != (char *) NULL)
(void) CloneString(&clone_info->extract,image_info->extract);
if (image_info->scenes != (char *) NULL)
(void) CloneString(&clone_info->scenes,image_info->scenes);
if (image_info->page != (char *) NULL)
(void) CloneString(&clone_info->page,image_info->page);
clone_info->interlace=image_info->interlace;
clone_info->endian=image_info->endian;
clone_info->units=image_info->units;
clone_info->quality=image_info->quality;
if (image_info->sampling_factor != (char *) NULL)
(void) CloneString(&clone_info->sampling_factor,
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
(void) CloneString(&clone_info->server_name,image_info->server_name);
if (image_info->font != (char *) NULL)
(void) CloneString(&clone_info->font,image_info->font);
if (image_info->texture != (char *) NULL)
(void) CloneString(&clone_info->texture,image_info->texture);
if (image_info->density != (char *) NULL)
(void) CloneString(&clone_info->density,image_info->density);
clone_info->pointsize=image_info->pointsize;
clone_info->fuzz=image_info->fuzz;
clone_info->matte_color=image_info->matte_color;
clone_info->background_color=image_info->background_color;
clone_info->border_color=image_info->border_color;
clone_info->transparent_color=image_info->transparent_color;
clone_info->dither=image_info->dither;
clone_info->monochrome=image_info->monochrome;
clone_info->colorspace=image_info->colorspace;
clone_info->type=image_info->type;
clone_info->orientation=image_info->orientation;
clone_info->ping=image_info->ping;
clone_info->verbose=image_info->verbose;
clone_info->progress_monitor=image_info->progress_monitor;
clone_info->client_data=image_info->client_data;
clone_info->cache=image_info->cache;
if (image_info->cache != (void *) NULL)
clone_info->cache=ReferencePixelCache(image_info->cache);
if (image_info->profile != (void *) NULL)
clone_info->profile=(void *) CloneStringInfo((StringInfo *)
image_info->profile);
SetImageInfoFile(clone_info,image_info->file);
SetImageInfoBlob(clone_info,image_info->blob,image_info->length);
clone_info->stream=image_info->stream;
clone_info->custom_stream=image_info->custom_stream;
(void) CopyMagickString(clone_info->magick,image_info->magick,
MagickPathExtent);
(void) CopyMagickString(clone_info->unique,image_info->unique,
MagickPathExtent);
(void) CopyMagickString(clone_info->filename,image_info->filename,
MagickPathExtent);
clone_info->channel=image_info->channel;
(void) CloneImageOptions(clone_info,image_info);
clone_info->debug=IsEventLogging();
clone_info->signature=image_info->signature;
return(clone_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C o p y I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CopyImagePixels() copies pixels from the source image as defined by the
% geometry the destination image at the specified offset.
%
% The format of the CopyImagePixels method is:
%
% MagickBooleanType CopyImagePixels(Image *image,const Image *source_image,
% const RectangleInfo *geometry,const OffsetInfo *offset,
% ExceptionInfo *exception);
%
% A description of each parameter follows:
%
% o image: the destination image.
%
% o source_image: the source image.
%
% o geometry: define the dimensions of the source pixel rectangle.
%
% o offset: define the offset in the destination image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType CopyImagePixels(Image *image,
const Image *source_image,const RectangleInfo *geometry,
const OffsetInfo *offset,ExceptionInfo *exception)
{
#define CopyImageTag "Copy/Image"
CacheView
*image_view,
*source_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(source_image != (Image *) NULL);
assert(geometry != (RectangleInfo *) NULL);
assert(offset != (OffsetInfo *) NULL);
if ((offset->x < 0) || (offset->y < 0) ||
((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) ||
((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows))
ThrowBinaryException(OptionError,"GeometryDoesNotContainImage",
image->filename);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
/*
Copy image pixels.
*/
status=MagickTrue;
progress=0;
source_view=AcquireVirtualCacheView(source_image,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,source_image,geometry->height,1)
#endif
for (y=0; y < (ssize_t) geometry->height; y++)
{
MagickBooleanType
sync;
register const Quantum
*magick_restrict p;
register ssize_t
x;
register Quantum
*magick_restrict q;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y,
geometry->width,1,exception);
q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y,
geometry->width,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) geometry->width; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait source_traits=GetPixelChannelTraits(source_image,channel);
if ((traits == UndefinedPixelTrait) ||
((traits & UpdatePixelTrait) == 0) ||
(source_traits == UndefinedPixelTrait))
continue;
SetPixelChannel(image,channel,p[i],q);
}
p+=GetPixelChannels(source_image);
q+=GetPixelChannels(image);
}
sync=SyncCacheViewAuthenticPixels(image_view,exception);
if (sync == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,CopyImageTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
source_view=DestroyCacheView(source_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImage() dereferences an image, deallocating memory associated with
% the image if the reference count becomes zero.
%
% The format of the DestroyImage method is:
%
% Image *DestroyImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *DestroyImage(Image *image)
{
MagickBooleanType
destroy;
/*
Dereference image.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
destroy=MagickFalse;
LockSemaphoreInfo(image->semaphore);
image->reference_count--;
if (image->reference_count == 0)
destroy=MagickTrue;
UnlockSemaphoreInfo(image->semaphore);
if (destroy == MagickFalse)
return((Image *) NULL);
/*
Destroy image.
*/
DestroyImagePixels(image);
image->channel_map=DestroyPixelChannelMap(image->channel_map);
if (image->montage != (char *) NULL)
image->montage=DestroyString(image->montage);
if (image->directory != (char *) NULL)
image->directory=DestroyString(image->directory);
if (image->colormap != (PixelInfo *) NULL)
image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap);
if (image->geometry != (char *) NULL)
image->geometry=DestroyString(image->geometry);
DestroyImageProfiles(image);
DestroyImageProperties(image);
DestroyImageArtifacts(image);
if (image->ascii85 != (Ascii85Info *) NULL)
image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85);
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
DestroyBlob(image);
if (image->semaphore != (SemaphoreInfo *) NULL)
RelinquishSemaphoreInfo(&image->semaphore);
image->signature=(~MagickCoreSignature);
image=(Image *) RelinquishMagickMemory(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageInfo() deallocates memory associated with an ImageInfo
% structure.
%
% The format of the DestroyImageInfo method is:
%
% ImageInfo *DestroyImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
if (image_info->size != (char *) NULL)
image_info->size=DestroyString(image_info->size);
if (image_info->extract != (char *) NULL)
image_info->extract=DestroyString(image_info->extract);
if (image_info->scenes != (char *) NULL)
image_info->scenes=DestroyString(image_info->scenes);
if (image_info->page != (char *) NULL)
image_info->page=DestroyString(image_info->page);
if (image_info->sampling_factor != (char *) NULL)
image_info->sampling_factor=DestroyString(
image_info->sampling_factor);
if (image_info->server_name != (char *) NULL)
image_info->server_name=DestroyString(
image_info->server_name);
if (image_info->font != (char *) NULL)
image_info->font=DestroyString(image_info->font);
if (image_info->texture != (char *) NULL)
image_info->texture=DestroyString(image_info->texture);
if (image_info->density != (char *) NULL)
image_info->density=DestroyString(image_info->density);
if (image_info->cache != (void *) NULL)
image_info->cache=DestroyPixelCache(image_info->cache);
if (image_info->profile != (StringInfo *) NULL)
image_info->profile=(void *) DestroyStringInfo((StringInfo *)
image_info->profile);
DestroyImageOptions(image_info);
image_info->signature=(~MagickCoreSignature);
image_info=(ImageInfo *) RelinquishMagickMemory(image_info);
return(image_info);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ D i s a s s o c i a t e I m a g e S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DisassociateImageStream() disassociates the image stream. It checks if the
% blob of the specified image is referenced by other images. If the reference
% count is higher then 1 a new blob is assigned to the specified image.
%
% The format of the DisassociateImageStream method is:
%
% void DisassociateImageStream(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport void DisassociateImageStream(Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
DisassociateBlob(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfo() initializes image_info to default values.
%
% The format of the GetImageInfo method is:
%
% void GetImageInfo(ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport void GetImageInfo(ImageInfo *image_info)
{
char
*synchronize;
ExceptionInfo
*exception;
/*
File and image dimension members.
*/
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info != (ImageInfo *) NULL);
(void) memset(image_info,0,sizeof(*image_info));
image_info->adjoin=MagickTrue;
image_info->interlace=NoInterlace;
image_info->channel=DefaultChannels;
image_info->quality=UndefinedCompressionQuality;
image_info->antialias=MagickTrue;
image_info->dither=MagickTrue;
synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE");
if (synchronize != (const char *) NULL)
{
image_info->synchronize=IsStringTrue(synchronize);
synchronize=DestroyString(synchronize);
}
exception=AcquireExceptionInfo();
(void) QueryColorCompliance(BackgroundColor,AllCompliance,
&image_info->background_color,exception);
(void) QueryColorCompliance(BorderColor,AllCompliance,
&image_info->border_color,exception);
(void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color,
exception);
(void) QueryColorCompliance(TransparentColor,AllCompliance,
&image_info->transparent_color,exception);
exception=DestroyExceptionInfo(exception);
image_info->debug=IsEventLogging();
image_info->signature=MagickCoreSignature;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageInfoFile() returns the image info file member.
%
% The format of the GetImageInfoFile method is:
%
% FILE *GetImageInfoFile(const ImageInfo *image_info)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
*/
MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info)
{
return(image_info->file);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMask() returns the mask associated with the image.
%
% The format of the GetImageMask method is:
%
% Image *GetImageMask(const Image *image,const PixelMask type,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
*/
MagickExport Image *GetImageMask(const Image *image,const PixelMask type,
ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
Image
*mask_image;
MagickBooleanType
status;
ssize_t
y;
/*
Get image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
switch (type)
{
case ReadPixelMask:
{
if ((image->channels & ReadMaskChannel) == 0)
return((Image *) NULL);
break;
}
case WritePixelMask:
{
if ((image->channels & WriteMaskChannel) == 0)
return((Image *) NULL);
break;
}
default:
{
if ((image->channels & CompositeMaskChannel) == 0)
return((Image *) NULL);
break;
}
}
mask_image=AcquireImage((ImageInfo *) NULL,exception);
status=SetImageExtent(mask_image,image->columns,image->rows,exception);
if (status == MagickFalse)
return(DestroyImage(mask_image));
status=MagickTrue;
mask_image->alpha_trait=UndefinedPixelTrait;
(void) SetImageColorspace(mask_image,GRAYColorspace,exception);
image_view=AcquireVirtualCacheView(image,exception);
mask_view=AcquireAuthenticCacheView(mask_image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1,
exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
switch (type)
{
case ReadPixelMask:
{
SetPixelGray(mask_image,GetPixelReadMask(image,p),q);
break;
}
case WritePixelMask:
{
SetPixelGray(mask_image,GetPixelWriteMask(image,p),q);
break;
}
default:
{
SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q);
break;
}
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(mask_image);
}
if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse)
status=MagickFalse;
}
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
mask_image=DestroyImage(mask_image);
return(mask_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ G e t I m a g e R e f e r e n c e C o u n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageReferenceCount() returns the image reference count.
%
% The format of the GetReferenceCount method is:
%
% ssize_t GetImageReferenceCount(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport ssize_t GetImageReferenceCount(Image *image)
{
ssize_t
reference_count;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
LockSemaphoreInfo(image->semaphore);
reference_count=image->reference_count;
UnlockSemaphoreInfo(image->semaphore);
return(reference_count);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageVirtualPixelMethod() gets the "virtual pixels" method for the
% image. A virtual pixel is any pixel access that is outside the boundaries
% of the image cache.
%
% The format of the GetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(GetPixelCacheVirtualMethod(image));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I n t e r p r e t I m a g e F i l e n a m e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% InterpretImageFilename() interprets embedded characters in an image filename.
% The filename length is returned.
%
% The format of the InterpretImageFilename method is:
%
% size_t InterpretImageFilename(const ImageInfo *image_info,Image *image,
% const char *format,int value,char *filename,ExceptionInfo *exception)
%
% A description of each parameter follows.
%
% o image_info: the image info..
%
% o image: the image.
%
% o format: A filename describing the format to use to write the numeric
% argument. Only the first numeric format identifier is replaced.
%
% o value: Numeric value to substitute into format filename.
%
% o filename: return the formatted filename in this character buffer.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport size_t InterpretImageFilename(const ImageInfo *image_info,
Image *image,const char *format,int value,char *filename,
ExceptionInfo *exception)
{
char
*q;
int
c;
MagickBooleanType
canonical;
register const char
*p;
ssize_t
field_width,
offset;
canonical=MagickFalse;
offset=0;
(void) CopyMagickString(filename,format,MagickPathExtent);
for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%'))
{
q=(char *) p+1;
if (*q == '%')
{
p=q+1;
continue;
}
field_width=0;
if (*q == '0')
field_width=(ssize_t) strtol(q,&q,10);
switch (*q)
{
case 'd':
case 'o':
case 'x':
{
q++;
c=(*q);
*q='\0';
(void) FormatLocaleString(filename+(p-format-offset),(size_t)
(MagickPathExtent-(p-format-offset)),p,value);
offset+=(4-field_width);
*q=c;
(void) ConcatenateMagickString(filename,q,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
case '[':
{
char
pattern[MagickPathExtent];
const char
*option;
register char
*r;
register ssize_t
i;
ssize_t
depth;
/*
Image option.
*/
if (strchr(p,']') == (char *) NULL)
break;
depth=1;
r=q+1;
for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++)
{
if (*r == '[')
depth++;
if (*r == ']')
depth--;
if (depth <= 0)
break;
pattern[i]=(*r++);
}
pattern[i]='\0';
if (LocaleNCompare(pattern,"filename:",9) != 0)
break;
option=(const char *) NULL;
if (image != (Image *) NULL)
option=GetImageProperty(image,pattern,exception);
if ((option == (const char *) NULL) && (image != (Image *) NULL))
option=GetImageArtifact(image,pattern);
if ((option == (const char *) NULL) &&
(image_info != (ImageInfo *) NULL))
option=GetImageOption(image_info,pattern);
if (option == (const char *) NULL)
break;
q--;
c=(*q);
*q='\0';
(void) CopyMagickString(filename+(p-format-offset),option,(size_t)
(MagickPathExtent-(p-format-offset)));
offset+=strlen(pattern)-strlen(option)+3;
*q=c;
(void) ConcatenateMagickString(filename,r+1,MagickPathExtent);
canonical=MagickTrue;
if (*(q-1) != '%')
break;
p++;
break;
}
default:
break;
}
}
if (canonical == MagickFalse)
(void) CopyMagickString(filename,format,MagickPathExtent);
else
for (q=filename; *q != '\0'; q++)
if ((*q == '%') && (*(q+1) == '%'))
(void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename)));
return(strlen(filename));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s H i g h D y n a m i c R a n g e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsHighDynamicRangeImage() returns MagickTrue if any pixel component is
% non-integer or exceeds the bounds of the quantum depth (e.g. for Q16
% 0..65535.
%
% The format of the IsHighDynamicRangeImage method is:
%
% MagickBooleanType IsHighDynamicRangeImage(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image,
ExceptionInfo *exception)
{
#if !defined(MAGICKCORE_HDRI_SUPPORT)
(void) image;
(void) exception;
return(MagickFalse);
#else
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
pixel;
PixelTrait
traits;
traits=GetPixelChannelTraits(image,(PixelChannel) i);
if (traits == UndefinedPixelTrait)
continue;
pixel=(double) p[i];
if ((pixel < 0.0) || (pixel > QuantumRange) ||
(pixel != (double) ((QuantumAny) pixel)))
break;
}
p+=GetPixelChannels(image);
if (i < (ssize_t) GetPixelChannels(image))
status=MagickFalse;
}
if (x < (ssize_t) image->columns)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status != MagickFalse ? MagickFalse : MagickTrue);
#endif
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e O b j e c t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageObject() returns MagickTrue if the image sequence contains a valid
% set of image objects.
%
% The format of the IsImageObject method is:
%
% MagickBooleanType IsImageObject(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsImageObject(const Image *image)
{
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
if (p->signature != MagickCoreSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s T a i n t I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsTaintImage() returns MagickTrue any pixel in the image has been altered
% since it was first constituted.
%
% The format of the IsTaintImage method is:
%
% MagickBooleanType IsTaintImage(const Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport MagickBooleanType IsTaintImage(const Image *image)
{
char
magick[MagickPathExtent],
filename[MagickPathExtent];
register const Image
*p;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
(void) CopyMagickString(magick,image->magick,MagickPathExtent);
(void) CopyMagickString(filename,image->filename,MagickPathExtent);
for (p=image; p != (Image *) NULL; p=GetNextImageInList(p))
{
if (p->taint != MagickFalse)
return(MagickTrue);
if (LocaleCompare(p->magick,magick) != 0)
return(MagickTrue);
if (LocaleCompare(p->filename,filename) != 0)
return(MagickTrue);
}
return(MagickFalse);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o d i f y I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ModifyImage() ensures that there is only a single reference to the image
% to be modified, updating the provided image pointer to point to a clone of
% the original image if necessary.
%
% The format of the ModifyImage method is:
%
% MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ModifyImage(Image **image,
ExceptionInfo *exception)
{
Image
*clone_image;
assert(image != (Image **) NULL);
assert(*image != (Image *) NULL);
assert((*image)->signature == MagickCoreSignature);
if ((*image)->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename);
if (GetImageReferenceCount(*image) <= 1)
return(MagickTrue);
clone_image=CloneImage(*image,0,0,MagickTrue,exception);
LockSemaphoreInfo((*image)->semaphore);
(*image)->reference_count--;
UnlockSemaphoreInfo((*image)->semaphore);
*image=clone_image;
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w M a g i c k I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewMagickImage() creates a blank image canvas of the specified size and
% background color.
%
% The format of the NewMagickImage method is:
%
% Image *NewMagickImage(const ImageInfo *image_info,const size_t width,
% const size_t height,const PixelInfo *background,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o width: the image width.
%
% o height: the image height.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *NewMagickImage(const ImageInfo *image_info,
const size_t width,const size_t height,const PixelInfo *background,
ExceptionInfo *exception)
{
CacheView
*image_view;
Image
*image;
MagickBooleanType
status;
ssize_t
y;
assert(image_info != (const ImageInfo *) NULL);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image_info->signature == MagickCoreSignature);
assert(background != (const PixelInfo *) NULL);
image=AcquireImage(image_info,exception);
image->columns=width;
image->rows=height;
image->colorspace=background->colorspace;
image->alpha_trait=background->alpha_trait;
image->fuzz=background->fuzz;
image->depth=background->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e f e r e n c e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ReferenceImage() increments the reference count associated with an image
% returning a pointer to the image.
%
% The format of the ReferenceImage method is:
%
% Image *ReferenceImage(Image *image)
%
% A description of each parameter follows:
%
% o image: the image.
%
*/
MagickExport Image *ReferenceImage(Image *image)
{
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
LockSemaphoreInfo(image->semaphore);
image->reference_count++;
UnlockSemaphoreInfo(image->semaphore);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePage() resets the image page canvas and position.
%
% The format of the ResetImagePage method is:
%
% MagickBooleanType ResetImagePage(Image *image,const char *page)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o page: the relative page specification.
%
*/
MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page)
{
MagickStatusType
flags;
RectangleInfo
geometry;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
flags=ParseAbsoluteGeometry(page,&geometry);
if ((flags & WidthValue) != 0)
{
if ((flags & HeightValue) == 0)
geometry.height=geometry.width;
image->page.width=geometry.width;
image->page.height=geometry.height;
}
if ((flags & AspectValue) != 0)
{
if ((flags & XValue) != 0)
image->page.x+=geometry.x;
if ((flags & YValue) != 0)
image->page.y+=geometry.y;
}
else
{
if ((flags & XValue) != 0)
{
image->page.x=geometry.x;
if ((image->page.width == 0) && (geometry.x > 0))
image->page.width=image->columns+geometry.x;
}
if ((flags & YValue) != 0)
{
image->page.y=geometry.y;
if ((image->page.height == 0) && (geometry.y > 0))
image->page.height=image->rows+geometry.y;
}
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% R e s e t I m a g e P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ResetImagePixels() reset the image pixels, that is, all the pixel components
% are zereod.
%
% The format of the SetImage method is:
%
% MagickBooleanType ResetImagePixels(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType ResetImagePixels(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
size_t
length;
ssize_t
y;
void
*pixels;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
pixels=AcquirePixelCachePixels(image,&length,exception);
if (pixels != (void *) NULL)
{
/*
Reset in-core image pixels.
*/
(void) memset(pixels,0,length);
return(MagickTrue);
}
/*
Reset image pixels.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
(void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum));
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e A l p h a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageAlpha() sets the alpha levels of the image.
%
% The format of the SetImageAlpha method is:
%
% MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o alpha: the level of transparency: 0 is fully transparent and QuantumRange
% is fully opaque.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
image->alpha_trait=BlendPixelTrait;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelAlpha(image,alpha,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e B a c k g r o u n d C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageBackgroundColor() initializes the image pixels to the image
% background color. The background color is defined by the background_color
% member of the image structure.
%
% The format of the SetImage method is:
%
% MagickBooleanType SetImageBackgroundColor(Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageBackgroundColor(Image *image,
ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
PixelInfo
background;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
if ((image->background_color.alpha != OpaqueAlpha) &&
(image->alpha_trait == UndefinedPixelTrait))
(void) SetImageAlphaChannel(image,OnAlphaChannel,exception);
ConformPixelInfo(image,&image->background_color,&background,exception);
/*
Set image background color.
*/
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,&background,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C h a n n e l M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageChannelMask() sets the image channel mask from the specified channel
% mask.
%
% The format of the SetImageChannelMask method is:
%
% ChannelType SetImageChannelMask(Image *image,
% const ChannelType channel_mask)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o channel_mask: the channel mask.
%
*/
MagickExport ChannelType SetImageChannelMask(Image *image,
const ChannelType channel_mask)
{
return(SetPixelChannelMask(image,channel_mask));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e C o l o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageColor() set the entire image canvas to the specified color.
%
% The format of the SetImageColor method is:
%
% MagickBooleanType SetImageColor(Image *image,const PixelInfo *color,
% ExeptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o background: the image color.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageColor(Image *image,
const PixelInfo *color,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
assert(color != (const PixelInfo *) NULL);
image->colorspace=color->colorspace;
image->alpha_trait=color->alpha_trait;
image->fuzz=color->fuzz;
image->depth=color->depth;
status=MagickTrue;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
SetPixelViaPixelInfo(image,color,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e S t o r a g e C l a s s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageStorageClass() sets the image class: DirectClass for true color
% images or PseudoClass for colormapped images.
%
% The format of the SetImageStorageClass method is:
%
% MagickBooleanType SetImageStorageClass(Image *image,
% const ClassType storage_class,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o storage_class: The image class.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageStorageClass(Image *image,
const ClassType storage_class,ExceptionInfo *exception)
{
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image->storage_class=storage_class;
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageExtent() sets the image size (i.e. columns & rows).
%
% The format of the SetImageExtent method is:
%
% MagickBooleanType SetImageExtent(Image *image,const size_t columns,
% const size_t rows,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o columns: The image width in pixels.
%
% o rows: The image height in pixels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns,
const size_t rows,ExceptionInfo *exception)
{
if ((columns == 0) || (rows == 0))
ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename);
image->columns=columns;
image->rows=rows;
if ((image->depth == 0) || (image->depth > (8*sizeof(MagickSizeType))))
ThrowBinaryException(ImageError,"ImageDepthNotSupported",image->filename);
return(SyncImagePixelCache(image,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S e t I m a g e I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfo() initializes the 'magick' field of the ImageInfo structure.
% It is set to a type of image format based on the prefix or suffix of the
% filename. For example, 'ps:image' returns PS indicating a Postscript image.
% JPEG is returned for this filename: 'image.jpg'. The filename prefix has
% precendence over the suffix. Use an optional index enclosed in brackets
% after a file name to specify a desired scene of a multi-resolution image
% format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value
% indicates success.
%
% The format of the SetImageInfo method is:
%
% MagickBooleanType SetImageInfo(ImageInfo *image_info,
% const unsigned int frames,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o frames: the number of images you intend to write.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info,
const unsigned int frames,ExceptionInfo *exception)
{
char
component[MagickPathExtent],
magic[MagickPathExtent],
*q;
const MagicInfo
*magic_info;
const MagickInfo
*magick_info;
ExceptionInfo
*sans_exception;
Image
*image;
MagickBooleanType
status;
register const char
*p;
ssize_t
count;
/*
Look for 'image.format' in filename.
*/
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
*component='\0';
GetPathComponent(image_info->filename,SubimagePath,component);
if (*component != '\0')
{
/*
Look for scene specification (e.g. img0001.pcd[4]).
*/
if (IsSceneGeometry(component,MagickFalse) == MagickFalse)
{
if (IsGeometry(component) != MagickFalse)
(void) CloneString(&image_info->extract,component);
}
else
{
size_t
first,
last;
(void) CloneString(&image_info->scenes,component);
image_info->scene=StringToUnsignedLong(image_info->scenes);
image_info->number_scenes=image_info->scene;
p=image_info->scenes;
for (q=(char *) image_info->scenes; *q != '\0'; p++)
{
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ','))
p++;
first=(size_t) strtol(p,&q,10);
last=first;
while (isspace((int) ((unsigned char) *q)) != 0)
q++;
if (*q == '-')
last=(size_t) strtol(q+1,&q,10);
if (first > last)
Swap(first,last);
if (first < image_info->scene)
image_info->scene=first;
if (last > image_info->number_scenes)
image_info->number_scenes=last;
p=q;
}
image_info->number_scenes-=image_info->scene-1;
}
}
*component='\0';
if (*image_info->magick == '\0')
GetPathComponent(image_info->filename,ExtensionPath,component);
#if defined(MAGICKCORE_ZLIB_DELEGATE)
if (*component != '\0')
if ((LocaleCompare(component,"gz") == 0) ||
(LocaleCompare(component,"Z") == 0) ||
(LocaleCompare(component,"svgz") == 0) ||
(LocaleCompare(component,"wmz") == 0))
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
#if defined(MAGICKCORE_BZLIB_DELEGATE)
if (*component != '\0')
if (LocaleCompare(component,"bz2") == 0)
{
char
path[MagickPathExtent];
(void) CopyMagickString(path,image_info->filename,MagickPathExtent);
path[strlen(path)-strlen(component)-1]='\0';
GetPathComponent(path,ExtensionPath,component);
}
#endif
image_info->affirm=MagickFalse;
sans_exception=AcquireExceptionInfo();
if ((*component != '\0') && (IsGlob(component) == MagickFalse))
{
MagickFormatType
format_type;
register ssize_t
i;
static const char
*format_type_formats[] =
{
"AUTOTRACE",
"BROWSE",
"DCRAW",
"EDIT",
"LAUNCH",
"MPEG:DECODE",
"MPEG:ENCODE",
"PRINT",
"PS:ALPHA",
"PS:CMYK",
"PS:COLOR",
"PS:GRAY",
"PS:MONO",
"SCAN",
"SHOW",
"WIN",
(char *) NULL
};
/*
User specified image format.
*/
(void) CopyMagickString(magic,component,MagickPathExtent);
LocaleUpper(magic);
/*
Look for explicit image formats.
*/
format_type=UndefinedFormatType;
magick_info=GetMagickInfo(magic,sans_exception);
if ((magick_info != (const MagickInfo *) NULL) &&
(magick_info->format_type != UndefinedFormatType))
format_type=magick_info->format_type;
i=0;
while ((format_type == UndefinedFormatType) &&
(format_type_formats[i] != (char *) NULL))
{
if ((*magic == *format_type_formats[i]) &&
(LocaleCompare(magic,format_type_formats[i]) == 0))
format_type=ExplicitFormatType;
i++;
}
if (format_type == UndefinedFormatType)
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
else
if (format_type == ExplicitFormatType)
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
}
if (LocaleCompare(magic,"RGB") == 0)
image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */
}
/*
Look for explicit 'format:image' in filename.
*/
*magic='\0';
GetPathComponent(image_info->filename,MagickPath,magic);
if (*magic == '\0')
{
(void) CopyMagickString(magic,image_info->magick,MagickPathExtent);
magick_info=GetMagickInfo(magic,sans_exception);
if (frames == 0)
GetPathComponent(image_info->filename,CanonicalPath,component);
else
GetPathComponent(image_info->filename,SubcanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,MagickPathExtent);
}
else
{
const DelegateInfo
*delegate_info;
/*
User specified image format.
*/
LocaleUpper(magic);
magick_info=GetMagickInfo(magic,sans_exception);
delegate_info=GetDelegateInfo(magic,"*",sans_exception);
if (delegate_info == (const DelegateInfo *) NULL)
delegate_info=GetDelegateInfo("*",magic,sans_exception);
if (((magick_info != (const MagickInfo *) NULL) ||
(delegate_info != (const DelegateInfo *) NULL)) &&
(IsMagickConflict(magic) == MagickFalse))
{
image_info->affirm=MagickTrue;
(void) CopyMagickString(image_info->magick,magic,MagickPathExtent);
GetPathComponent(image_info->filename,CanonicalPath,component);
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
}
}
sans_exception=DestroyExceptionInfo(sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
if ((image_info->adjoin != MagickFalse) && (frames > 1))
{
/*
Test for multiple image support (e.g. image%02d.png).
*/
(void) InterpretImageFilename(image_info,(Image *) NULL,
image_info->filename,(int) image_info->scene,component,exception);
if ((LocaleCompare(component,image_info->filename) != 0) &&
(strchr(component,'%') == (char *) NULL))
image_info->adjoin=MagickFalse;
}
if ((image_info->adjoin != MagickFalse) && (frames > 0))
{
/*
Some image formats do not support multiple frames per file.
*/
magick_info=GetMagickInfo(magic,exception);
if (magick_info != (const MagickInfo *) NULL)
if (GetMagickAdjoin(magick_info) == MagickFalse)
image_info->adjoin=MagickFalse;
}
if (image_info->affirm != MagickFalse)
return(MagickTrue);
if (frames == 0)
{
unsigned char
*magick;
size_t
magick_size;
/*
Determine the image format from the first few bytes of the file.
*/
magick_size=GetMagicPatternExtent(exception);
if (magick_size == 0)
return(MagickFalse);
image=AcquireImage(image_info,exception);
(void) CopyMagickString(image->filename,image_info->filename,
MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
if ((IsBlobSeekable(image) == MagickFalse) ||
(IsBlobExempt(image) != MagickFalse))
{
/*
Copy image to seekable temporary file.
*/
*component='\0';
status=ImageToFile(image,component,exception);
(void) CloseBlob(image);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
SetImageInfoFile(image_info,(FILE *) NULL);
(void) CopyMagickString(image->filename,component,MagickPathExtent);
status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception);
if (status == MagickFalse)
{
image=DestroyImage(image);
return(MagickFalse);
}
(void) CopyMagickString(image_info->filename,component,
MagickPathExtent);
image_info->temporary=MagickTrue;
}
magick=(unsigned char *) AcquireMagickMemory(magick_size);
if (magick == (unsigned char *) NULL)
{
(void) CloseBlob(image);
image=DestroyImage(image);
return(MagickFalse);
}
(void) memset(magick,0,magick_size);
count=ReadBlob(image,magick_size,magick);
(void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR);
(void) CloseBlob(image);
image=DestroyImage(image);
/*
Check magic cache.
*/
sans_exception=AcquireExceptionInfo();
magic_info=GetMagicInfo(magick,(size_t) count,sans_exception);
magick=(unsigned char *) RelinquishMagickMemory(magick);
if ((magic_info != (const MagicInfo *) NULL) &&
(GetMagicName(magic_info) != (char *) NULL))
{
/*
Try to use magick_info that was determined earlier by the extension
*/
if ((magick_info != (const MagickInfo *) NULL) &&
(GetMagickUseExtension(magick_info) != MagickFalse) &&
(LocaleCompare(magick_info->magick_module,GetMagicName(
magic_info)) == 0))
(void) CopyMagickString(image_info->magick,magick_info->name,
MagickPathExtent);
else
{
(void) CopyMagickString(image_info->magick,GetMagicName(
magic_info),MagickPathExtent);
magick_info=GetMagickInfo(image_info->magick,sans_exception);
}
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
return(MagickTrue);
}
magick_info=GetMagickInfo(image_info->magick,sans_exception);
if ((magick_info == (const MagickInfo *) NULL) ||
(GetMagickEndianSupport(magick_info) == MagickFalse))
image_info->endian=UndefinedEndian;
sans_exception=DestroyExceptionInfo(sans_exception);
}
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o B l o b %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoBlob() sets the image info blob member.
%
% The format of the SetImageInfoBlob method is:
%
% void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
% const size_t length)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o blob: the blob.
%
% o length: the blob length.
%
*/
MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob,
const size_t length)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->blob=(void *) blob;
image_info->length=length;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o C u s t o m S t r e a m %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoCustomStream() sets the image info custom stream handlers.
%
% The format of the SetImageInfoCustomStream method is:
%
% void SetImageInfoCustomStream(ImageInfo *image_info,
% CustomStreamInfo *custom_stream)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o custom_stream: your custom stream methods.
%
*/
MagickExport void SetImageInfoCustomStream(ImageInfo *image_info,
CustomStreamInfo *custom_stream)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->custom_stream=(CustomStreamInfo *) custom_stream;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e I n f o F i l e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageInfoFile() sets the image info file member.
%
% The format of the SetImageInfoFile method is:
%
% void SetImageInfoFile(ImageInfo *image_info,FILE *file)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o file: the file.
%
*/
MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file)
{
assert(image_info != (ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
if (image_info->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",
image_info->filename);
image_info->file=file;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageMask() associates a mask with the image. The mask must be the same
% dimensions as the image.
%
% The format of the SetImageMask method is:
%
% MagickBooleanType SetImageMask(Image *image,const PixelMask type,
% const Image *mask,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o mask: the image mask.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type,
const Image *mask,ExceptionInfo *exception)
{
CacheView
*mask_view,
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (mask == (const Image *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
mask_view=AcquireVirtualCacheView(mask,exception);
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(mask,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception);
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
MagickRealType
intensity;
intensity=0.0;
if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows))
intensity=GetPixelIntensity(mask,p);
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,ClampToQuantum(intensity),q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,ClampToQuantum(intensity),q);
break;
}
default:
{
SetPixelCompositeMask(image,ClampToQuantum(intensity),q);
break;
}
}
p+=GetPixelChannels(mask);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
mask_view=DestroyCacheView(mask_view);
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e R e g i o n M a s k %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageRegionMask() associates a mask with the image as defined by the
% specified region.
%
% The format of the SetImageRegionMask method is:
%
% MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type,
% const RectangleInfo *region,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the mask type, ReadPixelMask or WritePixelMask.
%
% o geometry: the mask region.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SetImageRegionMask(Image *image,
const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
ssize_t
y;
/*
Set image mask as defined by the region.
*/
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (region == (const RectangleInfo *) NULL)
{
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels & ~ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels & ~WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel);
break;
}
}
return(SyncImagePixelCache(image,exception));
}
switch (type)
{
case ReadPixelMask:
{
image->channels=(ChannelType) (image->channels | ReadMaskChannel);
break;
}
case WritePixelMask:
{
image->channels=(ChannelType) (image->channels | WriteMaskChannel);
break;
}
default:
{
image->channels=(ChannelType) (image->channels | CompositeMaskChannel);
break;
}
}
if (SyncImagePixelCache(image,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
image->mask_trait=UpdatePixelTrait;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
Quantum
pixel;
pixel=QuantumRange;
if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) &&
((y >= region->y) && (y < (region->y+(ssize_t) region->height))))
pixel=(Quantum) 0;
switch (type)
{
case ReadPixelMask:
{
SetPixelReadMask(image,pixel,q);
break;
}
case WritePixelMask:
{
SetPixelWriteMask(image,pixel,q);
break;
}
default:
{
SetPixelCompositeMask(image,pixel,q);
break;
}
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image->mask_trait=UndefinedPixelTrait;
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i r t u a l P i x e l M e t h o d %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageVirtualPixelMethod() sets the "virtual pixels" method for the
% image and returns the previous setting. A virtual pixel is any pixel access
% that is outside the boundaries of the image cache.
%
% The format of the SetImageVirtualPixelMethod() method is:
%
% VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
% const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o virtual_pixel_method: choose the type of virtual pixel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image,
const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception)
{
assert(image != (const Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S m u s h I m a g e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SmushImages() takes all images from the current image pointer to the end
% of the image list and smushes them to each other top-to-bottom if the
% stack parameter is true, otherwise left-to-right.
%
% The current gravity setting now effects how the image is justified in the
% final image.
%
% The format of the SmushImages method is:
%
% Image *SmushImages(const Image *images,const MagickBooleanType stack,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o stack: A value other than 0 stacks the images top-to-bottom.
%
% o offset: minimum distance in pixels between images.
%
% o exception: return any errors or warnings in this structure.
%
*/
static ssize_t SmushXGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*left_view,
*right_view;
const Image
*left_image,
*right_image;
RectangleInfo
left_geometry,
right_geometry;
register const Quantum
*p;
register ssize_t
i,
y;
size_t
gap;
ssize_t
x;
if (images->previous == (Image *) NULL)
return(0);
right_image=images;
SetGeometry(smush_image,&right_geometry);
GravityAdjustGeometry(right_image->columns,right_image->rows,
right_image->gravity,&right_geometry);
left_image=images->previous;
SetGeometry(smush_image,&left_geometry);
GravityAdjustGeometry(left_image->columns,left_image->rows,
left_image->gravity,&left_geometry);
gap=right_image->columns;
left_view=AcquireVirtualCacheView(left_image,exception);
right_view=AcquireVirtualCacheView(right_image,exception);
for (y=0; y < (ssize_t) smush_image->rows; y++)
{
for (x=(ssize_t) left_image->columns-1; x > 0; x--)
{
p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(left_image,p) != TransparentAlpha) ||
((left_image->columns-x-1) >= gap))
break;
}
i=(ssize_t) left_image->columns-x-1;
for (x=0; x < (ssize_t) right_image->columns; x++)
{
p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(right_image,p) != TransparentAlpha) ||
((x+i) >= (ssize_t) gap))
break;
}
if ((x+i) < (ssize_t) gap)
gap=(size_t) (x+i);
}
right_view=DestroyCacheView(right_view);
left_view=DestroyCacheView(left_view);
if (y < (ssize_t) smush_image->rows)
return(offset);
return((ssize_t) gap-offset);
}
static ssize_t SmushYGap(const Image *smush_image,const Image *images,
const ssize_t offset,ExceptionInfo *exception)
{
CacheView
*bottom_view,
*top_view;
const Image
*bottom_image,
*top_image;
RectangleInfo
bottom_geometry,
top_geometry;
register const Quantum
*p;
register ssize_t
i,
x;
size_t
gap;
ssize_t
y;
if (images->previous == (Image *) NULL)
return(0);
bottom_image=images;
SetGeometry(smush_image,&bottom_geometry);
GravityAdjustGeometry(bottom_image->columns,bottom_image->rows,
bottom_image->gravity,&bottom_geometry);
top_image=images->previous;
SetGeometry(smush_image,&top_geometry);
GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity,
&top_geometry);
gap=bottom_image->rows;
top_view=AcquireVirtualCacheView(top_image,exception);
bottom_view=AcquireVirtualCacheView(bottom_image,exception);
for (x=0; x < (ssize_t) smush_image->columns; x++)
{
for (y=(ssize_t) top_image->rows-1; y > 0; y--)
{
p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(top_image,p) != TransparentAlpha) ||
((top_image->rows-y-1) >= gap))
break;
}
i=(ssize_t) top_image->rows-y-1;
for (y=0; y < (ssize_t) bottom_image->rows; y++)
{
p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1,
exception);
if ((p == (const Quantum *) NULL) ||
(GetPixelAlpha(bottom_image,p) != TransparentAlpha) ||
((y+i) >= (ssize_t) gap))
break;
}
if ((y+i) < (ssize_t) gap)
gap=(size_t) (y+i);
}
bottom_view=DestroyCacheView(bottom_view);
top_view=DestroyCacheView(top_view);
if (x < (ssize_t) smush_image->columns)
return(offset);
return((ssize_t) gap-offset);
}
MagickExport Image *SmushImages(const Image *images,
const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception)
{
#define SmushImageTag "Smush/Image"
const Image
*image;
Image
*smush_image;
MagickBooleanType
proceed,
status;
MagickOffsetType
n;
PixelTrait
alpha_trait;
RectangleInfo
geometry;
register const Image
*next;
size_t
height,
number_images,
width;
ssize_t
x_offset,
y_offset;
/*
Compute maximum area of smushed area.
*/
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=images;
alpha_trait=image->alpha_trait;
number_images=1;
width=image->columns;
height=image->rows;
next=GetNextImageInList(image);
for ( ; next != (Image *) NULL; next=GetNextImageInList(next))
{
if (next->alpha_trait != UndefinedPixelTrait)
alpha_trait=BlendPixelTrait;
number_images++;
if (stack != MagickFalse)
{
if (next->columns > width)
width=next->columns;
height+=next->rows;
if (next->previous != (Image *) NULL)
height+=offset;
continue;
}
width+=next->columns;
if (next->previous != (Image *) NULL)
width+=offset;
if (next->rows > height)
height=next->rows;
}
/*
Smush images.
*/
smush_image=CloneImage(image,width,height,MagickTrue,exception);
if (smush_image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse)
{
smush_image=DestroyImage(smush_image);
return((Image *) NULL);
}
smush_image->alpha_trait=alpha_trait;
(void) SetImageBackgroundColor(smush_image,exception);
status=MagickTrue;
x_offset=0;
y_offset=0;
for (n=0; n < (MagickOffsetType) number_images; n++)
{
SetGeometry(smush_image,&geometry);
GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry);
if (stack != MagickFalse)
{
x_offset-=geometry.x;
y_offset-=SmushYGap(smush_image,image,offset,exception);
}
else
{
x_offset-=SmushXGap(smush_image,image,offset,exception);
y_offset-=geometry.y;
}
status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset,
y_offset,exception);
proceed=SetImageProgress(image,SmushImageTag,n,number_images);
if (proceed == MagickFalse)
break;
if (stack == MagickFalse)
{
x_offset+=(ssize_t) image->columns;
y_offset=0;
}
else
{
x_offset=0;
y_offset+=(ssize_t) image->rows;
}
image=GetNextImageInList(image);
}
if (stack == MagickFalse)
smush_image->columns=(size_t) x_offset;
else
smush_image->rows=(size_t) y_offset;
if (status == MagickFalse)
smush_image=DestroyImage(smush_image);
return(smush_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t r i p I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StripImage() strips an image of all profiles and comments.
%
% The format of the StripImage method is:
%
% MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception)
{
MagickBooleanType
status;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
(void) exception;
DestroyImageProfiles(image);
(void) DeleteImageProperty(image,"comment");
(void) DeleteImageProperty(image,"date:create");
(void) DeleteImageProperty(image,"date:modify");
status=SetImageArtifact(image,"png:exclude-chunk",
"bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date");
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ S y n c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImage() initializes the red, green, and blue intensities of each pixel
% as defined by the colormap index.
%
% The format of the SyncImage method is:
%
% MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline Quantum PushColormapIndex(Image *image,const Quantum index,
MagickBooleanType *range_exception)
{
if ((size_t) index < image->colors)
return(index);
*range_exception=MagickTrue;
return((Quantum) 0);
}
MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
range_exception,
status,
taint;
ssize_t
y;
assert(image != (Image *) NULL);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"...");
assert(image->signature == MagickCoreSignature);
if (image->ping != MagickFalse)
return(MagickTrue);
if (image->storage_class != PseudoClass)
return(MagickFalse);
assert(image->colormap != (PixelInfo *) NULL);
range_exception=MagickFalse;
status=MagickTrue;
taint=image->taint;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(range_exception,status) \
magick_number_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
Quantum
index;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception);
SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
}
image_view=DestroyCacheView(image_view);
image->taint=taint;
if ((image->ping == MagickFalse) && (range_exception != MagickFalse))
(void) ThrowMagickException(exception,GetMagickModule(),
CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S y n c I m a g e S e t t i n g s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SyncImageSettings() syncs any image_info global options into per-image
% attributes.
%
% Note: in IMv6 free form 'options' were always mapped into 'artifacts', so
% that operations and coders can find such settings. In IMv7 if a desired
% per-image artifact is not set, then it will directly look for a global
% option as a fallback, as such this copy is no longer needed, only the
% link set up.
%
% The format of the SyncImageSettings method is:
%
% MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
% MagickBooleanType SyncImagesSettings(const ImageInfo *image_info,
% Image *image,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image_info: the image info.
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info,
Image *images,ExceptionInfo *exception)
{
Image
*image;
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
image=images;
for ( ; image != (Image *) NULL; image=GetNextImageInList(image))
(void) SyncImageSettings(image_info,image,exception);
(void) DeleteImageOption(image_info,"page");
return(MagickTrue);
}
MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info,
Image *image,ExceptionInfo *exception)
{
const char
*option;
GeometryInfo
geometry_info;
MagickStatusType
flags;
ResolutionType
units;
/*
Sync image options.
*/
assert(image_info != (const ImageInfo *) NULL);
assert(image_info->signature == MagickCoreSignature);
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
option=GetImageOption(image_info,"background");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->background_color,
exception);
option=GetImageOption(image_info,"black-point-compensation");
if (option != (const char *) NULL)
image->black_point_compensation=(MagickBooleanType) ParseCommandOption(
MagickBooleanOptions,MagickFalse,option);
option=GetImageOption(image_info,"blue-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.blue_primary.x=geometry_info.rho;
image->chromaticity.blue_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x;
}
option=GetImageOption(image_info,"bordercolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->border_color,
exception);
/* FUTURE: do not sync compose to per-image compose setting here */
option=GetImageOption(image_info,"compose");
if (option != (const char *) NULL)
image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions,
MagickFalse,option);
/* -- */
option=GetImageOption(image_info,"compress");
if (option != (const char *) NULL)
image->compression=(CompressionType) ParseCommandOption(
MagickCompressOptions,MagickFalse,option);
option=GetImageOption(image_info,"debug");
if (option != (const char *) NULL)
image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
option=GetImageOption(image_info,"depth");
if (option != (const char *) NULL)
image->depth=StringToUnsignedLong(option);
option=GetImageOption(image_info,"endian");
if (option != (const char *) NULL)
image->endian=(EndianType) ParseCommandOption(MagickEndianOptions,
MagickFalse,option);
option=GetImageOption(image_info,"filter");
if (option != (const char *) NULL)
image->filter=(FilterType) ParseCommandOption(MagickFilterOptions,
MagickFalse,option);
option=GetImageOption(image_info,"fuzz");
if (option != (const char *) NULL)
image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0);
option=GetImageOption(image_info,"gravity");
if (option != (const char *) NULL)
image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions,
MagickFalse,option);
option=GetImageOption(image_info,"green-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.green_primary.x=geometry_info.rho;
image->chromaticity.green_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.green_primary.y=image->chromaticity.green_primary.x;
}
option=GetImageOption(image_info,"intent");
if (option != (const char *) NULL)
image->rendering_intent=(RenderingIntent) ParseCommandOption(
MagickIntentOptions,MagickFalse,option);
option=GetImageOption(image_info,"intensity");
if (option != (const char *) NULL)
image->intensity=(PixelIntensityMethod) ParseCommandOption(
MagickPixelIntensityOptions,MagickFalse,option);
option=GetImageOption(image_info,"interlace");
if (option != (const char *) NULL)
image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions,
MagickFalse,option);
option=GetImageOption(image_info,"interpolate");
if (option != (const char *) NULL)
image->interpolate=(PixelInterpolateMethod) ParseCommandOption(
MagickInterpolateOptions,MagickFalse,option);
option=GetImageOption(image_info,"loop");
if (option != (const char *) NULL)
image->iterations=StringToUnsignedLong(option);
option=GetImageOption(image_info,"mattecolor");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->matte_color,
exception);
option=GetImageOption(image_info,"orient");
if (option != (const char *) NULL)
image->orientation=(OrientationType) ParseCommandOption(
MagickOrientationOptions,MagickFalse,option);
option=GetImageOption(image_info,"page");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->page);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"quality");
if (option != (const char *) NULL)
image->quality=StringToUnsignedLong(option);
option=GetImageOption(image_info,"red-primary");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.red_primary.x=geometry_info.rho;
image->chromaticity.red_primary.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.red_primary.y=image->chromaticity.red_primary.x;
}
if (image_info->quality != UndefinedCompressionQuality)
image->quality=image_info->quality;
option=GetImageOption(image_info,"scene");
if (option != (const char *) NULL)
image->scene=StringToUnsignedLong(option);
option=GetImageOption(image_info,"taint");
if (option != (const char *) NULL)
image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions,
MagickFalse,option);
option=GetImageOption(image_info,"tile-offset");
if (option != (const char *) NULL)
{
char
*geometry;
geometry=GetPageGeometry(option);
flags=ParseAbsoluteGeometry(geometry,&image->tile_offset);
geometry=DestroyString(geometry);
}
option=GetImageOption(image_info,"transparent-color");
if (option != (const char *) NULL)
(void) QueryColorCompliance(option,AllCompliance,&image->transparent_color,
exception);
option=GetImageOption(image_info,"type");
if (option != (const char *) NULL)
image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse,
option);
option=GetImageOption(image_info,"units");
units=image_info->units;
if (option != (const char *) NULL)
units=(ResolutionType) ParseCommandOption(MagickResolutionOptions,
MagickFalse,option);
if (units != UndefinedResolution)
{
if (image->units != units)
switch (image->units)
{
case PixelsPerInchResolution:
{
if (units == PixelsPerCentimeterResolution)
{
image->resolution.x/=2.54;
image->resolution.y/=2.54;
}
break;
}
case PixelsPerCentimeterResolution:
{
if (units == PixelsPerInchResolution)
{
image->resolution.x=(double) ((size_t) (100.0*2.54*
image->resolution.x+0.5))/100.0;
image->resolution.y=(double) ((size_t) (100.0*2.54*
image->resolution.y+0.5))/100.0;
}
break;
}
default:
break;
}
image->units=units;
option=GetImageOption(image_info,"density");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->resolution.x=geometry_info.rho;
image->resolution.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->resolution.y=image->resolution.x;
}
}
option=GetImageOption(image_info,"virtual-pixel");
if (option != (const char *) NULL)
(void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod)
ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option),
exception);
option=GetImageOption(image_info,"white-point");
if (option != (const char *) NULL)
{
flags=ParseGeometry(option,&geometry_info);
image->chromaticity.white_point.x=geometry_info.rho;
image->chromaticity.white_point.y=geometry_info.sigma;
if ((flags & SigmaValue) == 0)
image->chromaticity.white_point.y=image->chromaticity.white_point.x;
}
/*
Pointer to allow the lookup of pre-image artifact will fallback to a global
option setting/define. This saves a lot of duplication of global options
into per-image artifacts, while ensuring only specifically set per-image
artifacts are preserved when parenthesis ends.
*/
if (image->image_info != (ImageInfo *) NULL)
image->image_info=DestroyImageInfo(image->image_info);
image->image_info=CloneImageInfo(image_info);
return(MagickTrue);
}
|
pdgstrf.c |
/*! @file
* \brief Performs LU factorization in parallel
*
* <pre>
* -- Distributed SuperLU routine (version 4.0) --
* Lawrence Berkeley National Lab, Univ. of California Berkeley.
* October 1, 2014
*
* Modified:
* September 1, 1999
* Feburary 7, 2001 use MPI_Isend/MPI_Irecv
* October 15, 2008 latency-reducing panel factorization
* July 12, 2011 static scheduling and arbitrary look-ahead
* March 13, 2013 change NTAGS to MPI_TAG_UB value
*
* Sketch of the algorithm
*
* =======================
*
* The following relations hold:
* * A_kk = L_kk * U_kk
* * L_ik = Aik * U_kk^(-1)
* * U_kj = L_kk^(-1) * A_kj
*
* ----------------------------------
* | | |
* ----|-----------------------------
* | | \ U_kk| |
* | | \ | U_kj |
* | |L_kk \ | || |
* ----|-------|---------||----------
* | | | \/ |
* | | | |
* | | | |
* | | | |
* | | L_ik ==> A_ij |
* | | | |
* | | | |
* | | | |
* ----------------------------------
*
* Handle the first block of columns separately.
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity. ( pdgstrf2(0), one column at a time )
* * Compute block row of U
* * Update trailing matrix
*
* Loop over the remaining blocks of columns.
* mycol = MYCOL( iam, grid );
* myrow = MYROW( iam, grid );
* N = nsupers;
* For (k = 1; k < N; ++k) {
* krow = PROW( k, grid );
* kcol = PCOL( k, grid );
* Pkk = PNUM( krow, kcol, grid );
*
* * Factor diagonal and subdiagonal blocks and test for exact
* singularity.
* if ( mycol == kcol ) {
* pdgstrf2(k), one column at a time
* }
*
* * Parallel triangular solve
* if ( iam == Pkk ) multicast L_k,k to this process row;
* if ( myrow == krow && mycol != kcol ) {
* Recv L_k,k from process Pkk;
* for (j = k+1; j < N; ++j)
* if ( PCOL( j, grid ) == mycol && A_k,j != 0 )
* U_k,j = L_k,k \ A_k,j;
* }
*
* * Parallel rank-k update
* if ( myrow == krow ) multicast U_k,k+1:N to this process column;
* if ( mycol == kcol ) multicast L_k+1:N,k to this process row;
* if ( myrow != krow ) {
* Pkj = PNUM( krow, mycol, grid );
* Recv U_k,k+1:N from process Pkj;
* }
* if ( mycol != kcol ) {
* Pik = PNUM( myrow, kcol, grid );
* Recv L_k+1:N,k from process Pik;
* }
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L_i,k != 0 && U_k,j != 0 )
* A_i,j = A_i,j - L_i,k * U_k,j;
* }
* }
*
* </pre>
*/
#include <math.h>
/*#include "mkl.h"*/
#include "superlu_ddefs.h"
#ifdef GPU_ACC
#include "cublas_utils.h"
/*#include "cublas_dgemm.h"*/
// #define NUM_CUDA_STREAMS 16
// #define NUM_CUDA_STREAMS 16
#endif
/* Various defininations */
/*
Name : SUPERNODE_PROFILE
Purpose : For SuperNode Level profiling of various measurements such as gigaflop/sec
obtained,bandwidth achived:
Overhead : Low
*/
// #define SUPERNODE_PROFILE
/*
Name : BAELINE
Purpose : baseline to compare performance against
Overhead : NA : this wont be used for running experiments
*/
// #define BASELINE
/*
Name : PHI_FRAMEWORK
Purpose : To simulate and test algorithm used for offloading Phi
Overhead : NA : this wont be used for running experiments
*/
#define PHI_FRAMEWORK
#define PDGSTRF2 pdgstrf2_trsm
#define PDGSTRS2 pdgstrs2_omp
extern void PDGSTRF2 (superlu_options_t *, int_t, int_t, double,
Glu_persist_t *, gridinfo_t *, LocalLU_t *,
MPI_Request *, int, SuperLUStat_t *, int *);
#ifdef _CRAY
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *, _fcd, _fcd, _fcd);
#else
extern void PDGSTRS2 (int_t, int_t, Glu_persist_t *, gridinfo_t *,
LocalLU_t *, SuperLUStat_t *);
#endif
#define ISORT /* Note: qsort() has bug on Mac */
#ifdef ISORT
extern void isort (int_t N, int_t * ARRAY1, int_t * ARRAY2);
extern void isort1 (int_t N, int_t * ARRAY);
#else
int
superlu_sort_perm (const void *arg1, const void *arg2)
{
const int_t *val1 = (const int_t *) arg1;
const int_t *val2 = (const int_t *) arg2;
return (*val2 < *val1);
}
#endif
/************************************************************************/
#include "dscatter.c"
/************************************************************************/
/*! \brief
*
* <pre>
* Purpose
* =======
*
* PDGSTRF performs the LU factorization in parallel.
*
* Arguments
* =========
*
* options (input) superlu_options_t*
* The structure defines the input parameters to control
* how the LU decomposition will be performed.
* The following field should be defined:
* o ReplaceTinyPivot (yes_no_t)
* Specifies whether to replace the tiny diagonals by
* sqrt(epsilon)*norm(A) during LU factorization.
*
* m (input) int
* Number of rows in the matrix.
*
* n (input) int
* Number of columns in the matrix.
*
* anorm (input) double
* The norm of the original matrix A, or the scaled A if
* equilibration was done.
*
* LUstruct (input/output) LUstruct_t*
* The data structures to store the distributed L and U factors.
* The following fields should be defined:
*
* o Glu_persist (input) Glu_persist_t*
* Global data structure (xsup, supno) replicated on all processes,
* describing the supernode partition in the factored matrices
* L and U:
* xsup[s] is the leading column of the s-th supernode,
* supno[i] is the supernode number to which column i belongs.
*
* o Llu (input/output) LocalLU_t*
* The distributed data structures to store L and U factors.
* See superlu_ddefs.h for the definition of 'LocalLU_t'.
*
* grid (input) gridinfo_t*
* The 2D process mesh. It contains the MPI communicator, the number
* of process rows (NPROW), the number of process columns (NPCOL),
* and my process rank. It is an input argument to all the
* parallel routines.
* Grid can be initialized by subroutine SUPERLU_GRIDINIT.
* See superlu_ddefs.h for the definition of 'gridinfo_t'.
*
* stat (output) SuperLUStat_t*
* Record the statistics on runtime and floating-point operation count.
* See util.h for the definition of 'SuperLUStat_t'.
*
* info (output) int*
* = 0: successful exit
* < 0: if info = -i, the i-th argument had an illegal value
* > 0: if info = i, U(i,i) is exactly zero. The factorization has
* been completed, but the factor U is exactly singular,
* and division by zero will occur if it is used to solve a
* system of equations.
* </pre>
*/
int_t
pdgstrf(superlu_options_t * options, int m, int n, double anorm,
LUstruct_t * LUstruct, gridinfo_t * grid, SuperLUStat_t * stat, int *info)
{
#ifdef _CRAY
_fcd ftcs = _cptofcd ("N", strlen ("N"));
_fcd ftcs1 = _cptofcd ("L", strlen ("L"));
_fcd ftcs2 = _cptofcd ("N", strlen ("N"));
_fcd ftcs3 = _cptofcd ("U", strlen ("U"));
#endif
double zero = 0.0, alpha = 1.0, beta = 0.0;
int_t *xsup;
int_t *lsub, *lsub1, *usub, *Usub_buf;
int_t **Lsub_buf_2, **Usub_buf_2;
double **Lval_buf_2, **Uval_buf_2; /* pointers to starts of bufs */
double *lusup, *lusup1, *uval, *Uval_buf; /* pointer to current buf */
int_t fnz, i, ib, ijb, ilst, it, iukp, jb, jj, klst, knsupc,
lb, lib, ldv, ljb, lptr, lptr0, lptrj, luptr, luptr0, luptrj,
nlb, nub, nsupc, rel, rukp, il, iu;
int_t Pc, Pr;
int iam, kcol, krow, yourcol, mycol, myrow, pi, pj;
int j, k, lk, nsupers; /* k - current panel to work on */
int k0; /* counter of the next supernode to be factored */
int kk, kk0, kk1, kk2, jj0; /* panels in the look-ahead window */
int iukp0, rukp0, flag0, flag1;
int nsupr, nbrow, segsize;
int msg0, msg2;
int_t **Ufstnz_br_ptr, **Lrowind_bc_ptr;
double **Unzval_br_ptr, **Lnzval_bc_ptr;
int_t *index;
double *nzval;
int_t *iuip, *ruip; /* Pointers to U index/nzval; size ceil(NSUPERS/Pr). */
double *ucol;
int *indirect, *indirect2;
double *tempv, *tempv2d;
int iinfo;
int *ToRecv, *ToSendD, **ToSendR;
Glu_persist_t *Glu_persist = LUstruct->Glu_persist;
LocalLU_t *Llu = LUstruct->Llu;
superlu_scope_t *scp;
float s_eps;
double thresh;
double *tempU2d, *tempu;
int full, ldt, ldu, lead_zero, ncols, ncb, nrb, p, pr, pc, nblocks;
int_t *etree_supno_l, *etree_supno, *blocks, *blockr, *Ublock, *Urows,
*Lblock, *Lrows, *perm_u, *sf_block, *sf_block_l, *nnodes_l,
*nnodes_u, *edag_supno_l, *recvbuf, **edag_supno;
float edag_supno_l_bytes;
#ifdef ISORT
int_t *iperm_u;
#endif
int *msgcnt; /* Count the size of the message xfer'd in each buffer:
* 0 : transferred in Lsub_buf[]
* 1 : transferred in Lval_buf[]
* 2 : transferred in Usub_buf[]
* 3 : transferred in Uval_buf[]
*/
int **msgcnts, **msgcntsU;
int *factored, *factoredU, nnodes, *sendcnts, *sdispls, *recvcnts,
*rdispls, *srows, *rrows;
etree_node *head, *tail, *ptr;
int *num_child;
int num_look_aheads, look_id, *look_ahead;
int_t *perm_c_supno, *iperm_c_supno;
MPI_Request *recv_req, **recv_reqs, **send_reqs, **send_reqs_u,
**recv_reqs_u;
MPI_Request *send_req, *U_diag_blk_send_req = NULL;
MPI_Status status;
void *attr_val;
int flag;
int iword = sizeof (int_t);
int dword = sizeof (double);
double scatter_timer = 0;
double gemm_timer = 0;
/* For measuring load imbalence in omp threads*/
double omp_load_imblc = 0.0;
double *omp_loop_time;
double CPUOffloadTimer = 0;
double CPUOffloadFlop = 0;
double CPUOffloadMop = 0;
double schur_flop_timer = 0.0;
double pdgstrf2_timer = 0.0;
double pdgstrs2_timer = 0.0;
double lookaheadupdatetimer = 0.0;
#if !defined( GPU_ACC )
/* Counter for couting memory operations */
double scatter_mem_op_counter = 0.0;
double scatter_mem_op_timer = 0.0;
double scatterL_mem_op_counter = 0.0;
double scatterL_mem_op_timer = 0.0;
double scatterU_mem_op_counter = 0.0;
double scatterU_mem_op_timer = 0.0;
double LookAheadRowSepTimer = 0.0;
double LookAheadRowSepMOP = 0.0;
double GatherTimer = 0.0;
double GatherMOP = 0.0;
double LookAheadGEMMTimer = 0.0;
double LookAheadGEMMFlOp = 0.0;
double LookAheadScatterTimer = 0.0;
double LookAheadScatterMOP = 0.0;
double schur_flop_counter = 0.0;
#endif
#if ( DEBUGlevel>=2 )
int_t num_copy = 0, num_update = 0;
#endif
#if ( PRNTlevel==3 )
int zero_msg = 0, total_msg = 0;
#endif
#if ( PROFlevel>=1 )
double t1, t2;
float msg_vol = 0, msg_cnt = 0;
#endif
/* Test the input parameters. */
*info = 0;
if (m < 0)
*info = -2;
else if (n < 0)
*info = -3;
if (*info) {
pxerbla ("pdgstrf", grid, -*info);
return (-1);
}
/* Quick return if possible. */
if (m == 0 || n == 0) return 0;
/*
* Initialization.
*/
iam = grid->iam;
Pc = grid->npcol;
Pr = grid->nprow;
myrow = MYROW (iam, grid);
mycol = MYCOL (iam, grid);
nsupers = Glu_persist->supno[n - 1] + 1;
xsup = Glu_persist->xsup;
s_eps = slamch_ ("Epsilon");
thresh = s_eps * anorm;
MPI_Attr_get (MPI_COMM_WORLD, MPI_TAG_UB, &attr_val, &flag);
if (!flag) {
fprintf (stderr, "Could not get TAG_UB\n");
return (-1);
}
int tag_ub = *(int *) attr_val;
#if ( PRNTlevel>=1 )
if (!iam)
printf ("MPI tag upper bound = %d\n", tag_ub);
#endif
#if ( DEBUGlevel>=1 )
if (s_eps == 0.0)
printf (" ***** warning s_eps = %e *****\n", s_eps);
CHECK_MALLOC (iam, "Enter pdgstrf()");
#endif
stat->ops[FACT] = 0.0;
stat->current_buffer = 0.0;
stat->peak_buffer = 0.0;
stat->gpu_buffer = 0.0;
/* make sure the range of look-ahead window [0, MAX_LOOKAHEADS-1] */
num_look_aheads = SUPERLU_MAX(0, SUPERLU_MIN(options->num_lookaheads, MAX_LOOKAHEADS - 1));
if (Pr * Pc > 1) {
if (!(U_diag_blk_send_req =
(MPI_Request *) SUPERLU_MALLOC (Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for U_diag_blk_send_req[].");
/* flag no outstanding Isend */
U_diag_blk_send_req[myrow] = MPI_REQUEST_NULL; /* used 0 before */
/* allocating buffers for look-ahead */
i = Llu->bufmax[0];
if (i != 0) {
if ( !(Llu->Lsub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * ((size_t) i))) )
ABORT ("Malloc fails for Lsub_buf.");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lsub_buf_2[jj + 1] = Llu->Lsub_buf_2[jj] + i;
}
i = Llu->bufmax[1];
if (i != 0) {
if (!(Llu->Lval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * ((size_t) i))))
ABORT ("Malloc fails for Lval_buf[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Lval_buf_2[jj + 1] = Llu->Lval_buf_2[jj] + i;
}
i = Llu->bufmax[2];
if (i != 0) {
if (!(Llu->Usub_buf_2[0] = intMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Usub_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Usub_buf_2[jj + 1] = Llu->Usub_buf_2[jj] + i;
}
i = Llu->bufmax[3];
if (i != 0) {
if (!(Llu->Uval_buf_2[0] = doubleMalloc_dist ((num_look_aheads + 1) * i)))
ABORT ("Malloc fails for Uval_buf_2[].");
for (jj = 0; jj < num_look_aheads; jj++)
Llu->Uval_buf_2[jj + 1] = Llu->Uval_buf_2[jj] + i;
}
}
log_memory( (Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1)
* iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1)
* dword, stat );
/* creating pointers to the look-ahead buffers */
if (! (Lsub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Lsub_buf_2[].");
if (! (Lval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for Lval_buf_2[].");
if (! (Usub_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int_t *))))
ABORT ("Malloc fails for Uval_buf_2[].");
if (! (Uval_buf_2 = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (double *))))
ABORT ("Malloc fails for buf_2[].");
for (i = 0; i <= num_look_aheads; i++) {
Lval_buf_2[i] = Llu->Lval_buf_2[i];
Lsub_buf_2[i] = Llu->Lsub_buf_2[i];
Uval_buf_2[i] = Llu->Uval_buf_2[i];
Usub_buf_2[i] = Llu->Usub_buf_2[i];
}
if (!(msgcnts = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (int *))))
ABORT ("Malloc fails for msgcntsU[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(msgcnts[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcnts[].");
if (!(msgcntsU[i] = SUPERLU_MALLOC (4 * sizeof (int))))
ABORT ("Malloc fails for msgcntsU[].");
}
if (! (recv_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs_u[].");
if (! (send_reqs_u = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (send_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for send_reqs_u[].");
if (! (recv_reqs = SUPERLU_MALLOC ((1 + num_look_aheads) * sizeof (MPI_Request *))))
ABORT ("Malloc fails for recv_reqs[].");
for (i = 0; i <= num_look_aheads; i++) {
if (!(recv_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req_u[i].");
if (!(send_reqs_u[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pr * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_req_u[i].");
if (!(send_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (2 * Pc * sizeof (MPI_Request))))
ABORT ("Malloc fails for send_reqs[i].");
if (!(recv_reqs[i] = (MPI_Request *) SUPERLU_MALLOC (4 * sizeof (MPI_Request))))
ABORT ("Malloc fails for recv_req[].");
send_reqs[i][0] = send_reqs[i][1] = MPI_REQUEST_NULL;
recv_reqs[i][0] = recv_reqs[i][1] = MPI_REQUEST_NULL;
}
if (!(factored = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factored[].");
if (!(factoredU = SUPERLU_MALLOC (nsupers * sizeof (int_t))))
ABORT ("Malloc fails for factoredU[].");
for (i = 0; i < nsupers; i++) factored[i] = factoredU[i] = -1;
log_memory(2 * nsupers * iword, stat);
int num_threads = 1;
#ifdef _OPENMP
#pragma omp parallel default(shared)
{
if (omp_get_thread_num () == 0) {
num_threads = omp_get_num_threads ();
}
}
#endif
#if 0
omp_loop_time = (double *) _mm_malloc (sizeof (double) * num_threads,64);
#else
omp_loop_time = (double *) doubleMalloc_dist(num_threads);
#endif
#if ( PRNTlevel>=1 )
if(!iam) printf(".. Starting with %d OpenMP threads \n", num_threads );
double tt1 = SuperLU_timer_ ();
#endif
nblocks = 0;
ncb = nsupers / Pc;
nrb = nsupers / Pr;
int nstreams = get_num_cuda_streams ();
/* int nstreams = NUM_CUDA_STREAMS; */
/* in order to have dynamic scheduling */
int *full_u_cols;
int *blk_ldu;
#if 0
full_u_cols = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
blk_ldu = (int_t *) _mm_malloc (sizeof (int_t) * ncb,64);
#else
full_u_cols = SUPERLU_MALLOC(ncb * sizeof(int));
blk_ldu = SUPERLU_MALLOC(ncb * sizeof(int));
#endif
log_memory(2 * ncb * iword, stat);
/* array holding last column blk for each partition,
used in SchCompUdt--CUDA.c */
#if 0
int *stream_end_col = (int_t *) _mm_malloc (sizeof (int_t) * nstreams,64);
#else
int *stream_end_col = SUPERLU_MALLOC( nstreams * sizeof(int) );
#endif
/* insert a check condition here */
#if 0 /* Sherry: not used? */
/* This bunch is used for static scheduling */
pair *full_col_count = (pair *) _mm_malloc (sizeof (pair) * ncb,64);
int_t *count_cols, *sum_cols, *partition;
count_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
sum_cols = (int_t *) _mm_malloc (sizeof (int_t) * num_threads,64);
partition = (int_t *) _mm_malloc (sizeof (int_t) * num_threads * ncb,64);
int_t ldp = ncb;
#endif
/* ##################################################################
* Compute a good static schedule based on the factorization task graph.
* ################################################################## */
perm_c_supno = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
iperm_c_supno = perm_c_supno + nsupers;
static_schedule(options, m, n, LUstruct, grid, stat,
perm_c_supno, iperm_c_supno, info);
#if ( DEBUGlevel >= 2 )
PrintInt10("schedule:perm_c_supno", nsupers, perm_c_supno);
printf("[%d] .. Turn off static schedule for debugging ..\n", iam);
/* Turn off static schedule */
for (i = 0; i < nsupers; ++i) perm_c_supno[i] = iperm_c_supno[i] = i;
#endif
/* ################################################################## */
/* constructing look-ahead table to indicate the last dependency */
int *look_ahead_l;
stat->num_look_aheads = num_look_aheads;
look_ahead_l = SUPERLU_MALLOC (nsupers * sizeof (int));
look_ahead = SUPERLU_MALLOC (nsupers * sizeof (int));
for (lb = 0; lb < nsupers; lb++) look_ahead_l[lb] = -1;
log_memory(3 * nsupers * iword, stat);
/* go through U-factor */
for (lb = 0; lb < nrb; ++lb) {
ib = lb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[lb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (myrow < nsupers % grid->nprow) {
ib = nrb * Pr + myrow;
index = Llu->Ufstnz_br_ptr[nrb];
if (index) { /* Not an empty row */
k = BR_HEADER;
for (j = 0; j < index[0]; ++j) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += UB_DESCRIPTOR + SuperSize (index[k]);
}
}
}
if (options->SymPattern == NO) {
/* go through L-factor */
for (lb = 0; lb < ncb; lb++) {
ib = lb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[lb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
if (mycol < nsupers % grid->npcol) {
ib = ncb * Pc + mycol;
index = Llu->Lrowind_bc_ptr[ncb];
if (index) {
k = BC_HEADER;
for (j = 0; j < index[0]; j++) {
jb = index[k];
if (jb != ib)
look_ahead_l[jb] =
SUPERLU_MAX (iperm_c_supno[ib], look_ahead_l[jb]);
k += LB_DESCRIPTOR + index[k + 1];
}
}
}
}
MPI_Allreduce (look_ahead_l, look_ahead, nsupers, MPI_INT, MPI_MAX, grid->comm);
SUPERLU_FREE (look_ahead_l);
#ifdef ISORT
iperm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
perm_u = SUPERLU_MALLOC (nsupers * sizeof (int_t));
#else
perm_u = SUPERLU_MALLOC (2 * nsupers * sizeof (int_t));
#endif
log_memory(nsupers * iword, stat);
#if ( PRNTlevel>=1 )
if (grid->iam == 0)
printf (" * init: %e seconds\n", SuperLU_timer_ () - tt1);
#endif
k = sp_ienv_dist (3); /* max supernode size */
#if 0
if ( !(Llu->ujrow = doubleMalloc_dist(k*(k+1)/2)) )
ABORT("Malloc fails for ujrow[].");
#else
/* Instead of half storage, we'll do full storage */
if (!(Llu->ujrow = doubleMalloc_dist (k * k)))
ABORT ("Malloc fails for ujrow[].");
log_memory(k * k * iword, stat);
#endif
#if ( PRNTlevel>=1 )
if (!iam) {
printf (".. thresh = s_eps %e * anorm %e = %e\n", s_eps, anorm,
thresh);
printf
(".. Buffer size: Lsub %ld\tLval %ld\tUsub %ld\tUval %ld\tLDA %ld\n",
(long int) Llu->bufmax[0], (long int) Llu->bufmax[1],
(long int) Llu->bufmax[2], (long int) Llu->bufmax[3],
(long int) Llu->bufmax[4]);
}
#endif
Lrowind_bc_ptr = Llu->Lrowind_bc_ptr;
Lnzval_bc_ptr = Llu->Lnzval_bc_ptr;
Ufstnz_br_ptr = Llu->Ufstnz_br_ptr;
Unzval_br_ptr = Llu->Unzval_br_ptr;
ToRecv = Llu->ToRecv;
ToSendD = Llu->ToSendD;
ToSendR = Llu->ToSendR;
ldt = sp_ienv_dist (3); /* Size of maximum supernode */
k = CEILING (nsupers, Pr); /* Number of local block rows */
/* Following circuit is for finding maximum block size */
int local_max_row_size = 0;
int max_row_size;
for (int i = 0; i < nsupers; ++i) {
int tpc = PCOL (i, grid);
if (mycol == tpc) {
lk = LBj (i, grid);
lsub = Lrowind_bc_ptr[lk];
if (lsub != NULL) {
local_max_row_size = SUPERLU_MAX (local_max_row_size, lsub[1]);
}
}
}
/* Max row size is global reduction of within A row */
MPI_Allreduce (&local_max_row_size, &max_row_size, 1, MPI_INT, MPI_MAX, (grid->rscp.comm));
/* Buffer size is max of of look ahead window */
/* int_t buffer_size =
SUPERLU_MAX (max_row_size * num_threads * ldt,
get_max_buffer_size ()); */
int cublas_nb = get_cublas_nb();
#ifdef GPU_ACC
int buffer_size = SUPERLU_MAX(max_row_size*nstreams*cublas_nb,get_max_buffer_size());
#else
int Threads_per_process = get_thread_per_process();
int buffer_size = SUPERLU_MAX(max_row_size*Threads_per_process*ldt,get_max_buffer_size());
#endif
/* symmetric assumption */
/* Note that in following expression 8 can be anything
as long as its not too big */
int bigu_size = 8 * sp_ienv_dist (3) * (max_row_size);
#if ( PRNTlevel>=1 )
if(!iam) printf("[%d] .. BIG U size %d \n", iam, bigu_size);
#endif
#ifdef GPU_ACC
// printf("hello 1\n");
double* bigU;
if ( checkCuda(cudaHostAlloc((void**)&bigU, bigu_size * sizeof(double), cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer U ");
int bigv_size = buffer_size;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double* bigV;
if ( checkCuda(cudaHostAlloc((void**)&bigV, bigv_size * sizeof(double) ,cudaHostAllocDefault)) )
ABORT("Malloc fails for dgemm buffer V");
DisplayHeader();
#if ( PRNTlevel>=1 )
printf(" Starting with %d Cuda Streams \n",nstreams );
#endif
cublasHandle_t *handle;
handle = (cublasHandle_t *) SUPERLU_MALLOC(sizeof(cublasHandle_t)*nstreams);
for(int i = 0; i < nstreams; i++) handle[i] = create_handle();
// creating streams
cudaStream_t *streams;
streams = (cudaStream_t *) SUPERLU_MALLOC(sizeof(cudaStream_t)*nstreams);
for (int i = 0; i < nstreams; ++i)
checkCuda( cudaStreamCreate(&streams[i]) );
// allocating data in device
double *dA, *dB, *dC;
cudaError_t cudaStat;
#if 0
// cudaStat = cudaMalloc( (void**)&dA, m*k*sizeof(double));
// HOw much should be the size of dA?
// for time being just making it
// cudaStat = cudaMalloc( (void**)&dA, ((max_row_size*sp_ienv_dist(3)))* sizeof(double));
#endif
cudaStat = cudaMalloc( (void**)&dA, max_row_size*sp_ienv_dist(3)* sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating A in the device %ld \n",m*k*sizeof(double) );
return 1;
}
// size of B should be max_supernode_size*buffer
cudaStat = cudaMalloc((void**)&dB, bigu_size * sizeof(double));
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating B in the device %ld \n",n*k*sizeof(double));
return 1;
}
cudaStat = cudaMalloc((void**)&dC, buffer_size* sizeof(double) );
if (cudaStat!= cudaSuccess) {
fprintf(stderr, "!!!! Error in allocating C in the device \n" );
return 1;
}
stat->gpu_buffer += ( max_row_size * sp_ienv_dist(3)
+ bigu_size + buffer_size ) * dword;
#else /* not CUDA */
double* bigU;
if ( !(bigU = doubleMalloc_dist(bigu_size)) )
ABORT ("Malloc fails for dgemm u buff U");
//Maximum size of of bigU= sqrt(buffsize) ?
int bigv_size = 8 * ldt * ldt * num_threads;
#if ( PRNTlevel>=1 )
if (!iam) printf("[%d] .. BIG V size %d\n", iam, bigv_size);
#endif
double *bigV;
if ( !(bigV = doubleMalloc_dist(bigv_size)) )
ABORT ("Malloc failed for dgemm buffer V");
#endif
log_memory((bigv_size + bigu_size) * dword, stat);
// mlock(bigU,(bigu_size) * sizeof (double));
#if ( PRNTlevel>=1 )
if(!iam) {
printf (" Max row size is %d \n", max_row_size);
printf (" Using buffer_size of %d \n", buffer_size);
printf (" Threads per process %d \n", num_threads);
}
#endif
if (!(tempv2d = doubleCalloc_dist (2 * ((size_t) ldt) * ldt)))
ABORT ("Calloc fails for tempv2d[].");
tempU2d = tempv2d + ldt * ldt;
if (!(indirect = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(indirect2 = SUPERLU_MALLOC (ldt * num_threads * sizeof(int))))
ABORT ("Malloc fails for indirect[].");
if (!(iuip = intMalloc_dist (k))) ABORT ("Malloc fails for iuip[].");
if (!(ruip = intMalloc_dist (k))) ABORT ("Malloc fails for ruip[].");
log_memory(2 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword, stat);
int_t *lookAheadFullRow,*lookAheadStRow,*lookAhead_lptr,*lookAhead_ib,
*RemainFullRow,*RemainStRow,*Remain_lptr,*Remain_ib;
lookAheadFullRow = intMalloc_dist( (num_look_aheads+1) );
lookAheadStRow = intMalloc_dist( (num_look_aheads+1) );
lookAhead_lptr = intMalloc_dist( (num_look_aheads+1) );
lookAhead_ib = intMalloc_dist( (num_look_aheads+1) );
int_t mrb= (nsupers+Pr-1) / Pr;
int_t mcb= (nsupers+Pc-1) / Pc;
RemainFullRow = intMalloc_dist(mrb);
RemainStRow = intMalloc_dist(mrb);
#if 0
Remain_lptr = (int *) _mm_malloc(sizeof(int)*mrb,1);
#else
Remain_lptr = intMalloc_dist(mrb);
#endif
// mlock(Remain_lptr, sizeof(int)*mrb );
Remain_ib = intMalloc_dist(mrb);
Remain_info_t *Remain_info;
#if 0
Remain_info = (Remain_info_t *) _mm_malloc(mrb*sizeof(Remain_info_t),64);
#else
Remain_info = (Remain_info_t *) SUPERLU_MALLOC(mrb*sizeof(Remain_info_t));
#endif
log_memory(4 * mrb * iword + mrb * sizeof(Remain_info_t), stat);
double *lookAhead_L_buff, *Remain_L_buff;
Ublock_info_t *Ublock_info;
ldt = sp_ienv_dist (3); /* max supernode size */
lookAhead_L_buff = doubleMalloc_dist(ldt*ldt* (num_look_aheads+1) );
log_memory(ldt * ldt * (num_look_aheads+1) * dword, stat);
#if 0
Remain_L_buff = (double *) _mm_malloc( sizeof(double)*(Llu->bufmax[1]),64);
Ublock_info = (Ublock_info_t *) _mm_malloc(mcb*sizeof(Ublock_info_t),64);
int * Ublock_info_iukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_rukp = (int *) _mm_malloc(mcb*sizeof(int),64);
int * Ublock_info_jb = (int *) _mm_malloc(mcb*sizeof(int),64);
#else
Remain_L_buff = doubleMalloc_dist(Llu->bufmax[1]);
Ublock_info = (Ublock_info_t *) SUPERLU_MALLOC(mcb*sizeof(Ublock_info_t));
int *Ublock_info_iukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_rukp = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
int *Ublock_info_jb = (int *) SUPERLU_MALLOC(mcb*sizeof(int));
#endif
log_memory(Llu->bufmax[1] * dword, stat);
double NetSchurUpTimer = 0;
double pdgstrfTimer= SuperLU_timer_();
/* ##################################################################
** Handle first block column separately to start the pipeline. **
################################################################## */
look_id = 0;
msgcnt = msgcnts[0];
send_req = send_reqs[0];
recv_req = recv_reqs[0];
k0 = 0;
k = perm_c_supno[0];
kcol = PCOL (k, grid);
krow = PROW (k, grid);
if (mycol == kcol) {
double ttt1 = SuperLU_timer_();
/* panel factorization */
PDGSTRF2 (options, k0, k, thresh, Glu_persist, grid, Llu,
U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_()-ttt1;
scp = &grid->rscp; /* The scope of process row. */
/* Multicasts numeric values of L(:,0) to process rows. */
lk = LBj (k, grid); /* Local block number. */
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
if (lsub) {
msgcnt[0] = lsub[1] + BC_HEADER + lsub[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub[1] * SuperSize (k);
} else {
msgcnt[0] = msgcnt[1] = 0;
}
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (lsub, msgcnt[0], mpi_int_t, pj, SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &send_req[pj]);
MPI_Isend (lusup, msgcnt[1], MPI_DOUBLE, pj, SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] first block cloumn Send L(:,%4d): lsub %4d, lusup %4d to Pc %2d\n",
iam, 0, msgcnt[0], msgcnt[1], pj);
#endif
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[0] * iword + msgcnt[1] * dword;
#endif
} /* end if */
} /* end for pj ... */
} else { /* Post immediate receives. */
if (ToRecv[k] >= 1) { /* Recv block column L(:,0). */
scp = &grid->rscp; /* The scope of process row. */
MPI_Irecv (Lsub_buf_2[0], Llu->bufmax[0], mpi_int_t, kcol,
SLU_MPI_TAG (0, 0) /* 0 */ ,
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[0], Llu->bufmax[1], MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, 0) /* 1 */ ,
scp->comm, &recv_req[1]);
}
} /* end if mycol == 0 */
factored[k] = 0; /* flag column k as factored. */
/* post receive of first U-row */
if (myrow != krow) {
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[0];
Uval_buf = Llu->Uval_buf_2[0];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, 0) /* 2%tag_ub */ ,
scp->comm, &recv_reqs_u[0][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, 0) /* 3%tag_ub */ ,
scp->comm, &recv_reqs_u[0][1]);
}
}
/* ##################################################################
**** MAIN LOOP ****
################################################################## */
for (k0 = 0; k0 < nsupers; ++k0) {
k = perm_c_supno[k0];
/* ============================================ *
* ======== look-ahead the new columns ======== *
* ============================================ */
/* tt1 = SuperLU_timer_(); */
if (k0 == 0) { /* look-ahead all the columns in the window */
kk1 = k0 + 1;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
} else { /* look-ahead one new column after the current window */
kk1 = k0 + num_look_aheads;
kk2 = SUPERLU_MIN (kk1, nsupers - 1);
}
for (kk0 = kk1; kk0 <= kk2; kk0++) {
/* loop through look-ahead window */
kk = perm_c_supno[kk0]; /* use the ordering from static schedule */
look_id = kk0 % (1 + num_look_aheads); /* which column in window */
if (look_ahead[kk] < k0) { /* does not depend on current column */
kcol = PCOL (kk, grid);
if (mycol == kcol) {
/* Panel factorization -- Factor diagonal and subdiagonal
L blocks and test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh, Glu_persist,
grid, Llu, U_diag_blk_send_req, tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Multicasts numeric values of L(:,kk) to process rows. */
/* ttt1 = SuperLU_timer_(); */
msgcnt = msgcnts[look_id]; /* point to the proper count array */
send_req = send_reqs[look_id];
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
lusup1 = Lnzval_bc_ptr[lk];
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
#if ( DEBUGlevel>=2 )
printf ("[%d] -1- Send L(:,%4d): #lsub1 %4d, #lusup1 %4d right to Pj %2d\n",
iam, kk, msgcnt[0], msgcnt[1], pj);
#endif
}
}
/* stat->time9 += SuperLU_timer_() - ttt1; */
} else { /* Post Recv of block column L(:,kk). */
/* double ttt1 = SuperLU_timer_(); */
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
/* stat->time10 += SuperLU_timer_() - ttt1; */
} /* end if mycol == Pc(kk) */
} /* end if look-ahead */
/* post irecv for U-row look-ahead */
krow = PROW (kk, grid);
if (myrow != krow) {
if (ToRecv[kk] == 2) { /* post iRecv block row U(k,:). */
scp = &grid->cscp; /* The scope of process column. */
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
MPI_Irecv (Usub_buf, Llu->bufmax[2], mpi_int_t, krow,
SLU_MPI_TAG (2, kk0) /* (4*kk0+2)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][0]);
MPI_Irecv (Uval_buf, Llu->bufmax[3], MPI_DOUBLE, krow,
SLU_MPI_TAG (3, kk0) /* (4*kk0+3)%tag_ub */ ,
scp->comm, &recv_reqs_u[look_id][1]);
}
}
} /* end for each column in look-ahead window */
/* stat->time4 += SuperLU_timer_()-tt1; */
/* ================================= *
* == looking-ahead the U columns == *
* ================================= */
kk1 = k0;
kk2 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = kk1; kk0 < kk2; kk0++) {
kk = perm_c_supno[kk0];
if (factoredU[kk0] != 1 && look_ahead[kk] < k0) {
kcol = PCOL (kk, grid);
krow = PROW (kk, grid);
lk = LBj (kk, grid); /* Local block number. */
look_id = kk0 % (1 + num_look_aheads);
msgcnt = msgcntsU[look_id];
recv_req = recv_reqs[look_id];
/* ================================================= *
* checking if diagonal block has been received *
* for panel factorization of U in look-ahead window *
* ================================================= */
if (mycol == kcol) {
flag0 = flag1 = 1;
msgcnt[0] = msgcnt[1] = -1;
} else {
flag0 = flag1 = 0;
if (ToRecv[kk] >= 1) {
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[0], &flag0, &status);
if (flag0) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
}
} else flag0 = 1;
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Test (&recv_req[1], &flag1, &status);
if (flag1) {
MPI_Get_count (&status, mpi_int_t, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
}
} else flag1 = 1;
} else msgcnt[0] = 0;
}
if (flag0 && flag1) {
/* tt1 = SuperLU_timer_(); */
scp = &grid->cscp; /* The scope of process column. */
if (myrow == krow) {
factoredU[kk0] = 1;
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
/* double ttt2 = SuperLU_timer_(); */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (kk0, kk, Glu_persist, grid, Llu,
stat);
}
pdgstrs2_timer += SuperLU_timer_()-ttt2;
/* stat->time8 += SuperLU_timer_()-ttt2; */
/* Multicasts U(k,:) to process columns. */
lk = LBi (kk, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Isend (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, kk0), /* (4*kk0+2)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi]);
MPI_Isend (uval, msgcnt[3], MPI_DOUBLE,
pi, SLU_MPI_TAG (3, kk0), /* (4*kk0+3)%tag_ub */
scp->comm, &send_reqs_u[look_id][pi + Pr]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) to Pr %2d\n",
iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
/* stat->time2 += SuperLU_timer_()-tt1; */
} /* end if myrow == krow */
} /* end if flag0 ... */
} /* end if factoredU[] ... */
} /* end for kk0 ... */
/* ============================================== *
* == start processing the current row of U == *
* ============================================== */
knsupc = SuperSize (k);
krow = PROW (k, grid);
kcol = PCOL (k, grid);
/* tt1 = SuperLU_timer_(); */
look_id = k0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
Usub_buf = Llu->Usub_buf_2[look_id];
Uval_buf = Llu->Uval_buf_2[look_id];
if (mycol == kcol) {
lk = LBj (k, grid); /* Local block number. */
for (pj = 0; pj < Pc; ++pj) {
/* Wait for Isend to complete before using lsub/lusup. */
if (ToSendR[lk][pj] != EMPTY) {
MPI_Wait (&send_req[pj], &status);
MPI_Wait (&send_req[pj + Pc], &status);
}
}
lsub = Lrowind_bc_ptr[lk];
lusup = Lnzval_bc_ptr[lk];
} else {
if (ToRecv[k] >= 1) { /* Recv block column L(:,k). */
scp = &grid->rscp; /* The scope of process row. */
/* ============================================ *
* waiting for L(:,kk) for outer-product uptate *
* if iam in U(kk,:) then *
* the diagonal block did not reach in time *
* for panel factorization of U(k,:) *
* ============================================ */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
if (recv_req[0] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[0]);
recv_req[0] = MPI_REQUEST_NULL;
} else {
msgcnt[0] = msgcntsU[look_id][0];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[0] == MPI_REQUEST_NULL, msgcnt[0] = %d\n",
iam, k, look_id, msgcnt[0]);
#endif
}
if (recv_req[1] != MPI_REQUEST_NULL) {
MPI_Wait (&recv_req[1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[1]);
recv_req[1] = MPI_REQUEST_NULL;
} else {
msgcnt[1] = msgcntsU[look_id][1];
#if (DEBUGlevel>=2)
printf("\t[%d] k=%d, look_id=%d, recv_req[1] == MPI_REQUEST_NULL, msgcnt[1] = %d\n",
iam, k, look_id, msgcnt[1]);
#endif
}
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
#if ( DEBUGlevel>=2 )
printf("[%d] Recv L(:,%4d): #lsub %4d, #lusup %4d from Pc %2d\n",
iam, k, msgcnt[0], msgcnt[1], kcol);
fflush (stdout);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[0]) ++zero_msg;
#endif
} else {
msgcnt[0] = 0;
}
lsub = Lsub_buf_2[look_id];
lusup = Lval_buf_2[look_id];
} /* if mycol = Pc(k) */
/* stat->time1 += SuperLU_timer_()-tt1; */
scp = &grid->cscp; /* The scope of process column. */
/* tt1 = SuperLU_timer_(); */
if (myrow == krow) {
lk = LBi (k, grid);
usub = Ufstnz_br_ptr[lk];
uval = Unzval_br_ptr[lk];
if (factoredU[k0] == -1) {
/* Parallel triangular solve across process row *krow* --
U(k,j) = L(k,k) \ A(k,j). */
double ttt2 = SuperLU_timer_();
#ifdef _OPENMP
#pragma omp parallel
#endif
{
PDGSTRS2 (k0, k, Glu_persist, grid, Llu, stat);
}
pdgstrs2_timer += SuperLU_timer_() - ttt2;
/* Multicasts U(k,:) along process columns. */
if (usub) {
msgcnt[2] = usub[2];
msgcnt[3] = usub[1];
} else {
msgcnt[2] = msgcnt[3] = 0;
}
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Send (usub, msgcnt[2], mpi_int_t, pi,
SLU_MPI_TAG (2, k0), /* (4*k0+2)%tag_ub */
scp->comm);
MPI_Send (uval, msgcnt[3], MPI_DOUBLE, pi,
SLU_MPI_TAG (3, k0), /* (4*k0+3)%tag_ub */
scp->comm);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
msg_cnt += 2;
msg_vol += msgcnt[2] * iword + msgcnt[3] * dword;
#endif
#if ( DEBUGlevel>=2 )
printf ("[%d] Send U(%4d,:) down to Pr %2d\n", iam, k, pi);
#endif
} /* if pi ... */
} /* for pi ... */
} /* if ToSendD ... */
} else {
/* =========================================== *
* waiting for U(k,:) for outer-product update *
* =========================================== */
if (ToSendD[lk] == YES) {
for (pi = 0; pi < Pr; ++pi) {
if (pi != myrow) {
MPI_Wait (&send_reqs_u[look_id][pi], &status);
MPI_Wait (&send_reqs_u[look_id][pi + Pr], &status);
}
}
}
msgcnt[2] = msgcntsU[look_id][2];
msgcnt[3] = msgcntsU[look_id][3];
}
/* stat->time2 += SuperLU_timer_()-tt1; */
} else { /* myrow != krow */
/* ========================================= *
* wait for U(k,:) for outer-product updates *
* ========================================= */
if (ToRecv[k] == 2) { /* Recv block row U(k,:). */
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Wait (&recv_reqs_u[look_id][0], &status);
MPI_Get_count (&status, mpi_int_t, &msgcnt[2]);
MPI_Wait (&recv_reqs_u[look_id][1], &status);
MPI_Get_count (&status, MPI_DOUBLE, &msgcnt[3]);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
#endif
usub = Usub_buf;
uval = Uval_buf;
#if ( DEBUGlevel>=2 )
printf ("[%d] Recv U(%4d,:) from Pr %2d\n", iam, k, krow);
#endif
#if ( PRNTlevel==3 )
++total_msg;
if (!msgcnt[2]) ++zero_msg;
#endif
} else {
msgcnt[2] = 0;
}
/* stat->time6 += SuperLU_timer_()-tt1; */
} /* if myrow == Pr(k) */
/*
* Parallel rank-k update; pair up blocks L(i,k) and U(k,j).
* for (j = k+1; k < N; ++k) {
* for (i = k+1; i < N; ++i)
* if ( myrow == PROW( i, grid ) && mycol == PCOL( j, grid )
* && L(i,k) != 0 && U(k,j) != 0 )
* A(i,j) = A(i,j) - L(i,k) * U(k,j);
*/
msg0 = msgcnt[0];
msg2 = msgcnt[2];
/* tt1 = SuperLU_timer_(); */
if (msg0 && msg2) { /* L(:,k) and U(k,:) are not empty. */
nsupr = lsub[1]; /* LDA of lusup. */
if (myrow == krow) { /* Skip diagonal block L(k,k). */
lptr0 = BC_HEADER + LB_DESCRIPTOR + lsub[BC_HEADER + 1];
luptr0 = knsupc;
nlb = lsub[0] - 1;
} else {
lptr0 = BC_HEADER;
luptr0 = 0;
nlb = lsub[0];
}
iukp = BR_HEADER; /* Skip header; Pointer to index[] of U(k,:) */
rukp = 0; /* Pointer to nzval[] of U(k,:) */
nub = usub[0]; /* Number of blocks in the block row U(k,:) */
klst = FstBlockC (k + 1);
/* -------------------------------------------------------------
Update the look-ahead block columns A(:,k+1:k+num_look_ahead)
------------------------------------------------------------- */
iukp0 = iukp;
rukp0 = rukp;
/* reorder the remaining columns in bottome-up */
/* TAU_STATIC_TIMER_START("LOOK_AHEAD_UPDATE"); */
for (jj = 0; jj < nub; jj++) {
#ifdef ISORT
iperm_u[jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[jj] = jj;
#else
perm_u[2 * jj] = iperm_c_supno[usub[iukp]]; /* Global block number of block U(k,j). */
perm_u[2 * jj + 1] = jj;
#endif
jb = usub[iukp]; /* Global block number of block U(k,j). */
nsupc = SuperSize (jb);
iukp += UB_DESCRIPTOR; /* Start fstnz of block U(k,j). */
iukp += nsupc;
}
iukp = iukp0;
#ifdef ISORT
isort (nub, iperm_u, perm_u);
#else
qsort (perm_u, (size_t) nub, 2 * sizeof (int_t),
&superlu_sort_perm);
#endif
j = jj0 = 0;
/************************************************************************/
double ttx =SuperLU_timer_();
#include "dlook_ahead_update.c"
lookaheadupdatetimer += SuperLU_timer_() - ttx;
/************************************************************************/
/*ifdef OMP_LOOK_AHEAD */
/* TAU_STATIC_TIMER_STOP("LOOK_AHEAD_UPDATE"); */
} /* if L(:,k) and U(k,:) not empty */
/* stat->time3 += SuperLU_timer_()-tt1; */
/* ================== */
/* == post receive == */
/* ================== */
kk1 = SUPERLU_MIN (k0 + num_look_aheads, nsupers - 1);
for (kk0 = k0 + 1; kk0 <= kk1; kk0++) {
kk = perm_c_supno[kk0];
kcol = PCOL (kk, grid);
if (look_ahead[kk] == k0) {
if (mycol != kcol) {
if (ToRecv[kk] >= 1) {
scp = &grid->rscp; /* The scope of process row. */
look_id = kk0 % (1 + num_look_aheads);
recv_req = recv_reqs[look_id];
MPI_Irecv (Lsub_buf_2[look_id], Llu->bufmax[0],
mpi_int_t, kcol, SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &recv_req[0]);
MPI_Irecv (Lval_buf_2[look_id], Llu->bufmax[1],
MPI_DOUBLE, kcol,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &recv_req[1]);
}
} else {
lk = LBj (kk, grid); /* Local block number. */
lsub1 = Lrowind_bc_ptr[lk];
lusup1 = Lnzval_bc_ptr[lk];
if (factored[kk] == -1) {
/* Factor diagonal and subdiagonal blocks and
test for exact singularity. */
factored[kk] = 0; /* flag column kk as factored */
double ttt1 = SuperLU_timer_();
PDGSTRF2 (options, kk0, kk, thresh,
Glu_persist, grid, Llu, U_diag_blk_send_req,
tag_ub, stat, info);
pdgstrf2_timer += SuperLU_timer_() - ttt1;
/* Process column *kcol+1* multicasts numeric
values of L(:,k+1) to process rows. */
look_id = kk0 % (1 + num_look_aheads);
send_req = send_reqs[look_id];
msgcnt = msgcnts[look_id];
if (lsub1) {
msgcnt[0] = lsub1[1] + BC_HEADER + lsub1[0] * LB_DESCRIPTOR;
msgcnt[1] = lsub1[1] * SuperSize (kk);
} else {
msgcnt[0] = 0;
msgcnt[1] = 0;
}
scp = &grid->rscp; /* The scope of process row. */
for (pj = 0; pj < Pc; ++pj) {
if (ToSendR[lk][pj] != EMPTY) {
MPI_Isend (lsub1, msgcnt[0], mpi_int_t, pj,
SLU_MPI_TAG (0, kk0), /* (4*kk0)%tag_ub */
scp->comm, &send_req[pj]);
MPI_Isend (lusup1, msgcnt[1], MPI_DOUBLE, pj,
SLU_MPI_TAG (1, kk0), /* (4*kk0+1)%tag_ub */
scp->comm, &send_req[pj + Pc]);
}
}
} /* for pj ... */
}
}
}
double tsch = SuperLU_timer_();
/************************************************************************/
#ifdef GPU_ACC
#include "dSchCompUdt-cuda.c"
#else
/*#include "SchCompUdt--Phi-2Ddynamic-alt.c"*/
#include "dSchCompUdt-2Ddynamic.c"
#endif
/*uncomment following to compare against SuperLU 3.3 baseline*/
/* #include "SchCompUdt--baseline.c" */
/************************************************************************/
NetSchurUpTimer += SuperLU_timer_()-tsch;
} /* for k0 = 0, ... */
/* ##################################################################
** END MAIN LOOP: for k0 = ...
################################################################## */
pdgstrfTimer= SuperLU_timer_()-pdgstrfTimer;
/* updating total flops */
#if ( PRNTlevel>=1 )
if (!iam) {
printf("Time in scattering %lf \n",scatter_timer );
printf("Time in dgemm %lf \n", gemm_timer );
printf("Total time spent in schur update is \t\t: %5.2lf seconds,\n",NetSchurUpTimer );
printf("Total Time in Factorization \t\t: %5.2lf seconds, \n", pdgstrfTimer);
printf("Time (other GEMM and Scatter) \t\t: %5.2lf seconds, \n", pdgstrfTimer-schur_flop_timer);
printf("Total time spent in schur update when offload \t\t: %5.2lf seconds,\n",CPUOffloadTimer );
}
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks(iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks(iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
// printf("Debug : MPI buffers 1\n");
/********************************************************
* Free memory *
********************************************************/
if (Pr * Pc > 1) {
SUPERLU_FREE (Lsub_buf_2[0]); /* also free Lsub_buf_2[1] */
SUPERLU_FREE (Lval_buf_2[0]); /* also free Lval_buf_2[1] */
if (Llu->bufmax[2] != 0)
SUPERLU_FREE (Usub_buf_2[0]);
if (Llu->bufmax[3] != 0)
SUPERLU_FREE (Uval_buf_2[0]);
if (U_diag_blk_send_req[myrow] != MPI_REQUEST_NULL) {
/* wait for last Isend requests to complete, deallocate objects */
for (krow = 0; krow < Pr; ++krow) {
if (krow != myrow)
MPI_Wait (U_diag_blk_send_req + krow, &status);
}
}
SUPERLU_FREE (U_diag_blk_send_req);
}
log_memory( -((Llu->bufmax[0] + Llu->bufmax[2]) * (num_look_aheads + 1) * iword +
(Llu->bufmax[1] + Llu->bufmax[3]) * (num_look_aheads + 1) * dword),
stat );
SUPERLU_FREE (Lsub_buf_2);
SUPERLU_FREE (Lval_buf_2);
SUPERLU_FREE (Usub_buf_2);
SUPERLU_FREE (Uval_buf_2);
SUPERLU_FREE (perm_c_supno);
SUPERLU_FREE (perm_u);
#ifdef ISORT
SUPERLU_FREE (iperm_u);
#endif
SUPERLU_FREE (look_ahead);
SUPERLU_FREE (factoredU);
SUPERLU_FREE (factored);
log_memory(-(6 * nsupers * iword), stat);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (msgcnts[i]);
SUPERLU_FREE (msgcntsU[i]);
}
SUPERLU_FREE (msgcnts);
SUPERLU_FREE (msgcntsU);
for (i = 0; i <= num_look_aheads; i++) {
SUPERLU_FREE (send_reqs_u[i]);
SUPERLU_FREE (recv_reqs_u[i]);
SUPERLU_FREE (send_reqs[i]);
SUPERLU_FREE (recv_reqs[i]);
}
SUPERLU_FREE (recv_reqs_u);
SUPERLU_FREE (send_reqs_u);
SUPERLU_FREE (recv_reqs);
SUPERLU_FREE (send_reqs);
// printf("Debug : MPI buffers 3\n");
#ifdef GPU_ACC
checkCuda (cudaFreeHost (bigV));
checkCuda (cudaFreeHost (bigU));
cudaFree( (void*)dA ); /* Sherry added */
cudaFree( (void*)dB );
cudaFree( (void*)dC );
SUPERLU_FREE( handle );
SUPERLU_FREE( streams );
#else
SUPERLU_FREE (bigV);
SUPERLU_FREE (bigU);
#endif
log_memory(-(bigv_size + bigu_size) * dword, stat);
// printf("Debug : MPI buffers 5\n");
SUPERLU_FREE (Llu->ujrow);
SUPERLU_FREE (tempv2d);
SUPERLU_FREE (indirect);
SUPERLU_FREE (indirect2); /* Sherry added */
SUPERLU_FREE (iuip);
SUPERLU_FREE (ruip);
ldt = sp_ienv_dist(3);
log_memory( -(3 * ldt *ldt * dword + 2 * ldt * num_threads * iword
+ 2 * k * iword), stat );
/* Sherry added */
SUPERLU_FREE(omp_loop_time);
SUPERLU_FREE(full_u_cols);
SUPERLU_FREE(blk_ldu);
log_memory(-2 * ncb * dword, stat);
SUPERLU_FREE(stream_end_col);
SUPERLU_FREE(lookAheadFullRow);
SUPERLU_FREE(lookAheadStRow);
SUPERLU_FREE(lookAhead_lptr);
SUPERLU_FREE(lookAhead_ib);
SUPERLU_FREE(RemainFullRow);
SUPERLU_FREE(RemainStRow);
SUPERLU_FREE(Remain_lptr);
SUPERLU_FREE(Remain_ib);
SUPERLU_FREE(Remain_info);
SUPERLU_FREE(lookAhead_L_buff);
SUPERLU_FREE(Remain_L_buff);
log_memory( -(4 * mrb * iword + mrb * sizeof(Remain_info_t) +
ldt * ldt * (num_look_aheads + 1) * dword +
Llu->bufmax[1] * dword), stat );
SUPERLU_FREE(Ublock_info);
SUPERLU_FREE(Ublock_info_iukp);
SUPERLU_FREE(Ublock_info_rukp);
SUPERLU_FREE(Ublock_info_jb);
/* Prepare error message. */
if (*info == 0)
*info = n + 1;
#if ( PROFlevel>=1 )
TIC (t1);
#endif
MPI_Allreduce (info, &iinfo, 1, MPI_INT, MPI_MIN, grid->comm);
#if ( PROFlevel>=1 )
TOC (t2, t1);
stat->utime[COMM] += t2;
{
float msg_vol_max, msg_vol_sum, msg_cnt_max, msg_cnt_sum;
MPI_Reduce (&msg_cnt, &msg_cnt_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_cnt, &msg_cnt_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_sum,
1, MPI_FLOAT, MPI_SUM, 0, grid->comm);
MPI_Reduce (&msg_vol, &msg_vol_max,
1, MPI_FLOAT, MPI_MAX, 0, grid->comm);
if (!iam) {
printf ("\tPDGSTRF comm stat:"
"\tAvg\tMax\t\tAvg\tMax\n"
"\t\t\tCount:\t%.0f\t%.0f\tVol(MB)\t%.2f\t%.2f\n",
msg_cnt_sum / Pr / Pc, msg_cnt_max,
msg_vol_sum / Pr / Pc * 1e-6, msg_vol_max * 1e-6);
}
}
#endif
if (iinfo == n + 1)
*info = 0;
else
*info = iinfo;
// printf("test out\n");
#if ( PRNTlevel==3 )
MPI_Allreduce (&zero_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # msg of zero size\t%d\n", iinfo);
MPI_Allreduce (&total_msg, &iinfo, 1, MPI_INT, MPI_SUM, grid->comm);
if (!iam)
printf (".. # total msg\t%d\n", iinfo);
#endif
#if ( DEBUGlevel>=2 )
for (i = 0; i < Pr * Pc; ++i) {
if (iam == i) {
dPrintLblocks (iam, nsupers, grid, Glu_persist, Llu);
dPrintUblocks (iam, nsupers, grid, Glu_persist, Llu);
printf ("(%d)\n", iam);
PrintInt10 ("Recv", nsupers, Llu->ToRecv);
}
MPI_Barrier (grid->comm);
}
#endif
#if ( DEBUGlevel>=3 )
printf ("(%d) num_copy=%d, num_update=%d\n", iam, num_copy, num_update);
#endif
#if ( DEBUGlevel>=1 )
CHECK_MALLOC (iam, "Exit pdgstrf()");
#endif
return 0;
} /* PDGSTRF */
|
tasks_mpi_openmp.c | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#ifdef _OPENMP
#include <omp.h>
#endif
#ifdef _MPI
#include "mpi.h"
#endif
#define STRINGIZE_MACRO(A) #A
#define STRINGIZE(A) STRINGIZE_MACRO(A)
int main(int argc, char *argv[])
{
int size = 1;
int rank = 0;
int tid = 0;
FILE *outputfile;
if (argc > 1)
{
outputfile = fopen(argv[1], "a");
if (outputfile == NULL) {
fprintf(stdout, "Error. Unable to open output file %s", argv[1]);
}
}
else
{
outputfile = stdout;
}
#ifdef _MPI
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &size);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#endif
#ifdef _OPENMP
#pragma omp parallel default(shared) private(tid)
#endif
{
#ifdef _OPENMP
int nthreads = omp_get_num_threads();
tid = omp_get_thread_num();
#else
int nthreads = 1;
#endif
// sleep for as long as it is necessary to distinguish whether we are running in parallel or not
// if GREASY is running correctly the test should take approximatelly this amount of time to run
sleep(atoi(STRINGIZE(SLEEP_TIME)));
fprintf(outputfile, "Hello, World from thread %d out of %d from process %d out of %d\n",
tid, nthreads, rank, size);
}
#ifdef _MPI
MPI_Finalize();
#endif
return 0;
}
|
re_model_template.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_RE_MODEL_TEMPLATE_H_
#define GPB_RE_MODEL_TEMPLATE_H_
#define _USE_MATH_DEFINES // for M_PI
#include <cmath>
#include <GPBoost/type_defs.h>
#include <GPBoost/re_comp.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <GPBoost/Vecchia_utils.h>
#include <GPBoost/GP_utils.h>
#include <GPBoost/likelihoods.h>
//#include <Eigen/src/misc/lapack.h>
#include <memory>
#include <mutex>
#include <vector>
#include <algorithm> // std::shuffle
#include <random> // std::default_random_engine
//#include <typeinfo> // Only needed for debugging
#include <chrono> // only needed for debugging
#include <thread> // only needed for debugging
//std::this_thread::sleep_for(std::chrono::milliseconds(200));// Only for debugging
//std::chrono::steady_clock::time_point begin = std::chrono::steady_clock::now();// Only for debugging
//std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now();// Only for debugging
//double el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - begin).count()) / 1000000.;// Only for debugging
//Log::REInfo("Time for : %g", el_time);// Only for debugging
#ifndef M_PI
#define M_PI 3.1415926535897932384626433832795029
#endif
#include <LightGBM/utils/log.h>
using LightGBM::Log;
namespace GPBoost {
/*!
* \brief Template class used in the wrapper class REModel
* The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t>
* depending on whether dense or sparse linear matrix algebra is used
*/
template<typename T_mat, typename T_chol>
class REModelTemplate {
public:
/*! \brief Null costructor */
REModelTemplate();
/*!
* \brief Costructor
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization)
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param num_re_group Number of grouped (intercept) random effects
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_re_group_rand_coef Number of grouped random coefficient
* \param num_gp Number of (intercept) Gaussian processes
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian)
* \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process
* \param num_neighbors The number of neighbors used in the Vecchia approximation
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions
* \param likelihood Likelihood function for the observed response variable. Default = "gaussian"
*/
REModelTemplate(data_size_t num_data,
const gp_id_t* cluster_ids_data = nullptr,
const char* re_group_data = nullptr,
data_size_t num_re_group = 0,
const double* re_group_rand_coef_data = nullptr,
const int32_t* ind_effect_group_rand_coef = nullptr,
data_size_t num_re_group_rand_coef = 0,
data_size_t num_gp = 0,
const double* gp_coords_data = nullptr,
int dim_gp_coords = 2,
const double* gp_rand_coef_data = nullptr,
data_size_t num_gp_rand_coef = 0,
const char* cov_fct = nullptr,
double cov_fct_shape = 0.,
bool vecchia_approx = false,
int num_neighbors = 30,
const char* vecchia_ordering = nullptr,
const char* vecchia_pred_type = nullptr,
int num_neighbors_pred = 30,
const char* likelihood = nullptr) {
CHECK(num_data > 0);
num_data_ = num_data;
vecchia_approx_ = vecchia_approx;
//Set up likelihood
string_t likelihood_strg;
if (likelihood == nullptr) {
likelihood_strg = "gaussian";
}
else {
likelihood_strg = std::string(likelihood);
}
gauss_likelihood_ = likelihood_strg == "gaussian";
//Set up GP IDs
SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_);
num_comps_total_ = 0;
//Do some checks for grouped RE components and set meta data (number of components etc.)
std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j)
if (num_re_group > 0) {
if (vecchia_approx) {
Log::REFatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation).");
}
num_re_group_ = num_re_group;
CHECK(re_group_data != nullptr);
if (num_re_group_rand_coef > 0) {
num_re_group_rand_coef_ = num_re_group_rand_coef;
CHECK(re_group_rand_coef_data != nullptr);
CHECK(ind_effect_group_rand_coef != nullptr);
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_);
}
ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_);
}
num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_;
num_comps_total_ += num_re_group_total_;
// Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels
re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_));
if (num_re_group_ > 0) {
ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels);
}
}
//Do some checks for GP components and set meta data (number of components etc.)
if (num_gp > 0) {
if (num_gp > 1) {
Log::REFatal("num_gp can only be either 0 or 1 in the current implementation");
}
num_gp_ = num_gp;
ind_intercept_gp_ = num_comps_total_;
CHECK(dim_gp_coords > 0);
CHECK(gp_coords_data != nullptr);
CHECK(cov_fct != nullptr);
dim_gp_coords_ = dim_gp_coords;
cov_fct_ = std::string(cov_fct);
cov_fct_shape_ = cov_fct_shape;
if (vecchia_approx) {
Log::REInfo("Starting nearest neighbor search for Vecchia approximation");
CHECK(num_neighbors > 0);
num_neighbors_ = num_neighbors;
CHECK(num_neighbors_pred > 0);
num_neighbors_pred_ = num_neighbors_pred;
if (vecchia_ordering == nullptr) {
vecchia_ordering_ = "none";
}
else {
vecchia_ordering_ = std::string(vecchia_ordering);
CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random");
}
if (vecchia_pred_type == nullptr) {
vecchia_pred_type_ = "order_obs_first_cond_obs_only";
}
else {
vecchia_pred_type_ = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str());
}
}
}
if (num_gp_rand_coef > 0) {//Random slopes
CHECK(gp_rand_coef_data != nullptr);
num_gp_rand_coef_ = num_gp_rand_coef;
}
num_gp_total_ = num_gp_ + num_gp_rand_coef_;
num_comps_total_ += num_gp_total_;
if (vecchia_approx) {
double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 8000) {
Log::REWarning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size);
}
}
}
DetermineSpecialCasesModelsEstimationPrediction();
//Create RE/GP component models
for (const auto& cluster_i : unique_clusters_) {
std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
CreateREComponentsVecchia(num_data_,
data_indices_per_cluster_,
cluster_i,
num_data_per_cluster_,
gp_coords_data,
dim_gp_coords_,
gp_rand_coef_data,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
re_comps_cluster_i,
nearest_neighbors_cluster_i,
dist_obs_neighbors_cluster_i,
dist_between_neighbors_cluster_i,
entries_init_B_cluster_i,
entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
vecchia_ordering_,
num_neighbors_);
nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i });
dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i });
dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i });
entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i });
entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i });
z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i });
}//end vecchia_approx_
else {//not vecchia_approx_
CreateREComponents(num_data_,
num_re_group_,
data_indices_per_cluster_,
cluster_i,
re_group_levels,
num_data_per_cluster_,
num_re_group_rand_coef_,
re_group_rand_coef_data,
ind_effect_group_rand_coef_,
num_gp_, gp_coords_data,
dim_gp_coords_,
gp_rand_coef_data,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
ind_intercept_gp_,
!only_grouped_REs_use_woodbury_identity_,
re_comps_cluster_i);
}//end not vecchia_approx_
re_comps_.insert({ cluster_i, re_comps_cluster_i });
}//end loop over clusters
//Create matrices Z and ZtZ if Woodbury identity is used (used only if there are only grouped REs and no GPs)
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity();
}
InitializeIdentityMatricesForGaussianData();
if (vecchia_approx_) {
Log::REInfo("Nearest neighbors for Vecchia approximation found");
}
CheckCompatibilitySpecialOptions();
InitializeLikelihoods(likelihood_strg);
DetermineCovarianceParameterIndicesNumCovPars();
////Following only prints things for debugging
//Log::REInfo("********************** Meta data ********************************");
//Log::REInfo("num_data_ : %d", num_data_);
//Log::REInfo("num_clusters_ : %d", num_clusters_);
//Log::REInfo("num_re_group_ : %d", num_re_group_);
//Log::REInfo("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_);
//Log::REInfo("num_re_group_total_ : %d", num_re_group_total_);
//Log::REInfo("num_gp_rand_coef_ : %d", num_gp_rand_coef_);
//Log::REInfo("num_gp_total_ : %d", num_gp_total_);
//Log::REInfo("num_cov_par_: %d", num_cov_par_);
//for (unsigned i = 0; i < ind_par_.size(); i++) { Log::REInfo("ind_par_[%d]: %d", i, ind_par_[i]); }
//Log::REInfo("******************************************************");
//int ii = 0;
//for (const auto& cluster_i : unique_clusters_) {
// Log::REInfo("unique_clusters_[%d]: %d", ii, cluster_i);
// Log::REInfo("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]);
// //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::REInfo("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); }
// if (num_re_group_ > 0) {
// Log::REInfo("*********************** Grouped REs *******************************");
// //Log::REInfo("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name());
// //Log::REInfo("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size());
// //for (const auto& re_comp : re_comps_[cluster_i]) {
// for (int j = 0; j < re_comps_[cluster_i].size(); ++j) {
// std::shared_ptr<RECompGroup<T_mat>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][j]);
// //for (const auto& el : re_comp_group->group_data_) { Log::REInfo("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); }
// if (!re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::REInfo("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// }
// else if (re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::REInfo("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::REInfo("re_comps_[%d][%d].rand_coef_data_[%d]: %g", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); }
// }
// }
// }
// ii++;
//}
}//end REModelTemplate
/*! \brief Destructor */
~REModelTemplate() {
}
/*! \brief Disable copy */
REModelTemplate& operator=(const REModelTemplate&) = delete;
/*! \brief Disable copy */
REModelTemplate(const REModelTemplate&) = delete;
/*!
* \brief Returns the type of likelihood
*/
string_t GetLikelihood() {
return(likelihood_[unique_clusters_[0]]->GetLikelihood());
}
/*!
* \brief Set / change the type of likelihood
* \param likelihood Likelihood name
*/
void SetLikelihood(const string_t& likelihood) {
bool gauss_likelihood_before = gauss_likelihood_;
bool only_one_grouped_RE_calculations_on_RE_scale_before = only_one_grouped_RE_calculations_on_RE_scale_;
bool only_grouped_REs_use_woodbury_identity_before = only_grouped_REs_use_woodbury_identity_;
gauss_likelihood_ = likelihood == "gaussian";
DetermineSpecialCasesModelsEstimationPrediction();
CheckCompatibilitySpecialOptions();
//Make adaptions in re_comps_ for special options when switching between Gaussian and non-Gaussian likelihoods
if (gauss_likelihood_before && !gauss_likelihood_) {
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
for (const auto& cluster_i : unique_clusters_) {
re_comps_[cluster_i][0]->DropZ();
}
}
}
else if (!gauss_likelihood_before && gauss_likelihood_) {
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
for (const auto& cluster_i : unique_clusters_) {
re_comps_[cluster_i][0]->AddZ();
}
}
}
//Matrices used when only_grouped_REs_use_woodbury_identity_==true
if ((only_grouped_REs_use_woodbury_identity_ && !only_grouped_REs_use_woodbury_identity_before) ||
(only_grouped_REs_use_woodbury_identity_ && only_one_grouped_RE_calculations_on_RE_scale_before && !only_one_grouped_RE_calculations_on_RE_scale_)) {
InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity();
}
else if (!only_grouped_REs_use_woodbury_identity_) {
//Delete not required matrices
Zt_ = std::map<gp_id_t, sp_mat_t>();
ZtZ_ = std::map<gp_id_t, sp_mat_t>();
cum_num_rand_eff_ = std::map<gp_id_t, std::vector<data_size_t>>();
Zj_square_sum_ = std::map<gp_id_t, std::vector<double>>();
ZtZj_ = std::map<gp_id_t, std::vector<sp_mat_t>>();
}
//Identity matrices for Gaussian data
if (!gauss_likelihood_before && gauss_likelihood_) {
InitializeIdentityMatricesForGaussianData();
}
else if (gauss_likelihood_before && !gauss_likelihood_) {
//Delete not required matrices
Id_ = std::map<gp_id_t, T_mat>();
Id_cs_ = std::map<gp_id_t, cs>();
}
InitializeLikelihoods(likelihood);
DetermineCovarianceParameterIndicesNumCovPars();
}
/*!
* \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients
* If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations
* \param y_data Response variable data
* \param covariate_data Covariate data (=independent variables, features). Set to nullptr if there is no covariate data
* \param num_covariates Number of covariates
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] optim_coef Optimal regression coefficients
* \param[out] num_it Number of iterations
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param init_coef Initial values for the regression coefficients
* \param lr_coef Learning rate for fixed-effect linear coefficients
* \param lr_cov Learning rate for covariance parameters. If lr<= 0, default values are used. Default value = 0.1 for "gradient_descent" and 1. for "fisher_scoring"
* \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true, only used for "gradient_descent"
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring" (default)
* \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares, default)
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param[out] std_dev_coef Standard deviations for the coefficients
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
* \param convergence_criterion The convergence criterion used for terminating the optimization algorithm. Options: "relative_change_in_log_likelihood" (default) or "relative_change_in_parameters"
* \param fixed_effects Fixed effects component of location parameter (only used for non-Gaussian data)
* \param learn_covariance_parameters If true, covariance parameters are estimated (default = true)
*/
void OptimLinRegrCoefCovPar(const double* y_data,
const double* covariate_data,
int num_covariates,
double* optim_cov_pars,
double* optim_coef,
int& num_it,
double* init_cov_pars,
double* init_coef = nullptr,
double lr_coef = 0.1,
double lr_cov = -1.,
double acc_rate_coef = 0.5,
double acc_rate_cov = 0.5,
int momentum_offset = 2,
int max_iter = 1000,
double delta_rel_conv = 1.0e-6,
bool use_nesterov_acc = true,
int nesterov_schedule_version = 0,
string_t optimizer_cov = "fisher_scoring",
string_t optimizer_coef = "wls",
double* std_dev_cov_par = nullptr,
double* std_dev_coef = nullptr,
bool calc_std_dev = false,
string_t convergence_criterion = "relative_change_in_log_likelihood",
const double* fixed_effects = nullptr,
bool learn_covariance_parameters = true) {
// Some checks
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::REFatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str());
}
if (SUPPORTED_CONV_CRIT_.find(convergence_criterion) == SUPPORTED_CONV_CRIT_.end()) {
Log::REFatal("Convergence criterion '%s' is not supported.", convergence_criterion.c_str());
}
if (!gauss_likelihood_) {
if (optimizer_cov != "gradient_descent") {
Log::REFatal("Optimizer option '%s' is not supported for covariance parameters for non-Gaussian data. Only 'gradient_descent' is supported.", optimizer_cov.c_str());
}
if (calc_std_dev) {
Log::REFatal("Calculation of standard deviations is not supported for non-Gaussian data.");
}
}
if (covariate_data != nullptr) {
if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) {
Log::REFatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str());
}
if (!gauss_likelihood_ && optimizer_coef != "gradient_descent") {
Log::REFatal("Optimizer option '%s' is not supported for linear regression coefficients for non-Gaussian data. Only 'gradient_descent' is supported.", optimizer_coef.c_str());
}
}
if (gauss_likelihood_ && fixed_effects != nullptr) {
Log::REFatal("Additional external fixed effects in 'fixed_effects' can currently only be used for non-Gaussian data");
}
// Initialization of variables
if (covariate_data == nullptr) {
has_covariates_ = false;
}
else {
has_covariates_ = true;
}
bool use_nesterov_acc_coef = use_nesterov_acc;
if (optimizer_cov != "gradient_descent") {
use_nesterov_acc = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring
}
if (optimizer_coef != "gradient_descent") {
use_nesterov_acc_coef = false;//Nesterov acceleration is only used for gradient descent, not for Fisher scoring
}
bool terminate_optim = false;
num_it = max_iter;
bool profile_out_marginal_variance = (optimizer_cov == "gradient_descent" && gauss_likelihood_);
// Profiling out sigma (=use closed-form expression for error / nugget variance) is better for gradient descent for Gaussian data (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
const double* fixed_effects_ptr = fixed_effects;
// Initialization of covariance parameters related variables
if (lr_cov < 0.) {//a value below 0 indicates that the default values should be used
if (optimizer_cov == "fisher_scoring") {
lr_cov = 1.;
}
else if (optimizer_cov == "gradient_descent") {
lr_cov = 0.1;
}
}
vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = vec_t(num_cov_par_);//used only if convergence_criterion == "relative_change_in_parameters"
vec_t cov_pars_after_grad_aux;//auxiliary variable used only if use_nesterov_acc == true
vec_t cov_pars_after_grad_aux_lag1 = cov_pars;//auxiliary variable used only if use_nesterov_acc == true
// Set response variabla data (if needed)
if ((!has_covariates_ || !gauss_likelihood_) && y_data != nullptr) {
SetY(y_data);
}
if (!has_covariates_ || !gauss_likelihood_) {
CHECK(y_has_been_set_);//response variable data needs to have been set at this point for non-Gaussian data and for Gaussian data without covariates
}
// Initialization of linear regression coefficients related variables
vec_t beta, beta_lag1, beta_after_grad_aux, beta_after_grad_aux_lag1, resid, fixed_effects_vec;
if (has_covariates_) {
num_coef_ = num_covariates;
X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_);
//Check whether one of the colums contains only 1's and if not, give out warning
vec_t vec_ones(num_data_);
vec_ones.setOnes();
bool has_intercept = false;
for (int icol = 0; icol < num_coef_; ++icol) {
if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) {
has_intercept = true;
break;
}
}
if (!has_intercept) {
Log::REWarning("The covariate data contains no column of ones. This means that there is no intercept included.");
}
beta = vec_t(num_covariates);
if (init_coef == nullptr) {
beta.setZero();
}
else {
beta = Eigen::Map<const vec_t>(init_coef, num_covariates);
}
beta_after_grad_aux_lag1 = beta;
if (gauss_likelihood_) {
CHECK(y_data != nullptr);
// Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data)
y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_);
y_has_been_set_ = true;
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
}
else {
fixed_effects_vec = X_ * beta;
if (fixed_effects != nullptr) {//add external fixed effects to linear predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
fixed_effects_ptr = fixed_effects_vec.data();
}
}//end if has_covariates_
Log::REDebug("Initial covariance parameters");
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); }
if (has_covariates_) {
Log::REDebug("Initial linear regression coefficients");
for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); }
}
// Initialize optimizer:
// - factorize the covariance matrix (Gaussian data) or calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data)
// - calculate initial value of objective function
CalcCovFactorOrModeAndNegLL(cov_pars, fixed_effects_ptr);
// TODO: for likelihood evaluation we don't need y_aux = Psi^-1 * y but only Psi^-0.5 * y. So, if has_covariates_==true, we might skip this step here and save some time
if (gauss_likelihood_) {
Log::REDebug("Initial negative log-likelihood: %g", neg_log_likelihood_);
}
else {
Log::REDebug("Initial approximate negative marginal log-likelihood: %g", neg_log_likelihood_);
}
// Start optimization
for (int it = 0; it < max_iter; ++it) {
neg_log_likelihood_lag1_ = neg_log_likelihood_;
cov_pars_lag1 = cov_pars;
// Update linear regression coefficients using gradient descent or generalized least squares (the latter option only for Gaussian data)
if (has_covariates_) {
beta_lag1 = beta;
if (optimizer_coef == "gradient_descent") {// one step of gradient descent
vec_t grad_beta;
// Calculate gradient for linear regression coefficients
CalcLinCoefGrad(cov_pars[0], beta, grad_beta, fixed_effects_ptr);
// Update linear regression coefficients, apply step size safeguard, and recalculate mode for Laplace approx. (only for non-Gaussian data)
UpdateLinCoef(beta, grad_beta, lr_coef, cov_pars, use_nesterov_acc_coef, it, beta_after_grad_aux, beta_after_grad_aux_lag1,
acc_rate_coef, nesterov_schedule_version, momentum_offset, fixed_effects, fixed_effects_vec);
fixed_effects_ptr = fixed_effects_vec.data();
}
else if (optimizer_coef == "wls") {// coordinate descent using generalized least squares (only for Gaussian data)
CHECK(gauss_likelihood_);
SetY(y_vec_.data());
CalcYAux();
UpdateCoefGLS(X_, beta);
// Set resid for updating covariance parameters
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
// Calculate y_aux = Psi^-1 * y (if not only_grouped_REs_use_woodbury_identity_) or y_tilde and y_tilde2 (if only_grouped_REs_use_woodbury_identity_) for covariance parameter update (only for Gaussian data)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_after_lin_coef_update_, true, true, true);
}
}
else {
neg_log_likelihood_after_lin_coef_update_ = neg_log_likelihood_lag1_;
}
// end update regression coefficients
// Update covariance parameters using one step of gradient descent or Fisher scoring
if (learn_covariance_parameters) {
// Calculate gradient or natural gradient = FI^-1 * grad (for Fisher scoring)
vec_t nat_grad; // nat_grad = grad for gradient descent and nat_grad = FI^-1 * grad for Fisher scoring (="natural" gradient)
if (optimizer_cov == "gradient_descent") {//gradient descent
if (gauss_likelihood_) {
// First, profile out sigma (=use closed-form expression for error / nugget variance) since this is better for gradient descent (the paremeters usually live on different scales and the nugget needs a small learning rate but the others not...)
CalcYTPsiIInvY<T_mat>(cov_pars[0], true, 1, true, true);
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
}
CalcCovParGrad(cov_pars, nat_grad, false, false, fixed_effects_ptr);
}
else if (optimizer_cov == "fisher_scoring") {//Fisher scoring
// We don't profile out sigma (=don't use closed-form expression for error / nugget variance) since this is better for Fisher scoring (otherwise much more iterations are needed)
vec_t grad;
den_mat_t FI;
CalcCovParGrad(cov_pars, grad, true, true, fixed_effects_ptr);
CalcFisherInformation(cov_pars, FI, true, true, true);
nat_grad = FI.llt().solve(grad);
}
// Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function
UpdateCovPars(cov_pars, nat_grad, lr_cov, profile_out_marginal_variance, use_nesterov_acc, it, optimizer_cov,
cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, acc_rate_cov, nesterov_schedule_version, momentum_offset, fixed_effects_ptr);
// Check for NA or Inf
if (std::isnan(cov_pars[0]) || std::isinf(cov_pars[0])) {
Log::REFatal("NaN or Inf occurred in covariance parameters. If this is a problem, consider doing the following. If you have used Fisher scoring, try using gradient descent. If you have used gradient descent, consider using a smaller learning rate.");
}
}
else {
neg_log_likelihood_ = neg_log_likelihood_after_lin_coef_update_;
}
// end update covariance parameters
// Check convergence
bool likelihood_is_na = std::isnan(neg_log_likelihood_) || std::isinf(neg_log_likelihood_);//if the likelihood is NA, we monitor the parameters instead of the likelihood
if (convergence_criterion == "relative_change_in_parameters" || likelihood_is_na) {
if (has_covariates_) {
if (((beta - beta_lag1).norm() < delta_rel_conv * beta_lag1.norm()) && ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm())) {
terminate_optim = true;
}
}
else {
if ((cov_pars - cov_pars_lag1).norm() < delta_rel_conv * cov_pars_lag1.norm()) {
terminate_optim = true;
}
}
}
else if (convergence_criterion == "relative_change_in_log_likelihood") {
if (std::abs(neg_log_likelihood_ - neg_log_likelihood_lag1_) < delta_rel_conv * std::abs(neg_log_likelihood_lag1_)) {
terminate_optim = true;
}
}
// Output for debugging
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::REDebug("GPModel parameter optimization iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::REDebug("cov_pars[%d]: %g", i, cov_pars[i]); }
for (int i = 0; i < std::min((int)beta.size(), 5); ++i) { Log::REDebug("beta[%d]: %g", i, beta[i]); }
if (has_covariates_ && beta.size() > 5) {
Log::REDebug("Note: only the first 5 linear regression coefficients are shown");
}
if (gauss_likelihood_) {
Log::REDebug("Negative log-likelihood: %g", neg_log_likelihood_);
}
else {
Log::REDebug("Approximate negative marginal log-likelihood: %g", neg_log_likelihood_);
}
}
// Check whether to terminate
if (terminate_optim) {
num_it = it + 1;
break;
}
}//end for loop for optimization
if (num_it == max_iter) {
Log::REDebug("GPModel: no convergence after the maximal number of iterations");
}
else {
Log::REDebug("GPModel parameter estimation finished after %d iteration", num_it);
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);//TODO: maybe another call to CalcCovFactor can be avoided in CalcStdDevCovPar (need to take care of cov_pars[0])
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
if (has_covariates_) {
for (int i = 0; i < num_covariates; ++i) {
optim_coef[i] = beta[i];
}
if (calc_std_dev) {
vec_t std_dev_beta(num_covariates);
CalcStdDevCoef(cov_pars, X_, std_dev_beta);
for (int i = 0; i < num_covariates; ++i) {
std_dev_coef[i] = std_dev_beta[i];
}
}
}
}//end OptimLinRegrCoefCovPar
/*!
* \brief Calculate the value of the negative log-likelihood
* \param y_data Response variable data
* \param cov_pars Values for covariance parameters of RE components
* \param[out] negll Negative log-likelihood
* \param CalcCovFactor_already_done If true, it is assumed that the covariance matrix has already been factorized
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant if not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
void EvalNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll,
bool CalcCovFactor_already_done = false, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
CHECK(!(CalcYAux_already_done && !CalcCovFactor_already_done));// CalcYAux_already_done && !CalcCovFactor_already_done makes no sense
if (y_data != nullptr) {
SetY(y_data);
}
if (!CalcCovFactor_already_done) {
const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_);
SetCovParsComps(cov_pars_vec);
CalcCovFactor(false, true, 1., false);//Create covariance matrix and factorize it
}
//Calculate quadratic form y^T Psi^-1 y
double yTPsiInvy;
CalcYTPsiIInvY<T_mat>(yTPsiInvy, true, 1, CalcYAux_already_done, CalcYtilde_already_done);
//Calculate log determinant
double log_det = 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
log_det -= D_inv_[cluster_i].diagonal().array().log().sum();
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
for (int j = 0; j < num_comps_total_; ++j) {
int num_rand_eff = cum_num_rand_eff_[cluster_i][j + 1] - cum_num_rand_eff_[cluster_i][j];
log_det += (num_rand_eff * std::log(re_comps_[cluster_i][j]->cov_pars_[0]));
}
}
else {
log_det += (2. * chol_facts_[cluster_i].diagonal().array().log().sum());
}
}
}
negll = yTPsiInvy / 2. / cov_pars[0] + log_det / 2. + num_data_ / 2. * (std::log(cov_pars[0]) + std::log(2 * M_PI));
}//end EvalNegLogLikelihood
/*!
* \brief Calculate the value of the approximate negative marginal log-likelihood obtained when using the Laplace approximation
* \param y_data Response variable data
* \param cov_pars Values for covariance parameters of RE components
* \param[out] negll Approximate negative marginal log-likelihood
* \param fixed_effects Fixed effects component of location parameter
* \param InitializeModeCovMat If true, posterior mode is initialized to 0 and the covariance matrix is calculated. Otherwise, existing values are used
* \param CalcModePostRandEff_already_done If true, it is assumed that the posterior mode of the random effects has already been calculated
*/
void EvalLAApproxNegLogLikelihood(const double* y_data, const double* cov_pars, double& negll,
const double* fixed_effects = nullptr, bool InitializeModeCovMat = true, bool CalcModePostRandEff_already_done = false) {
if (y_data != nullptr) {
SetY(y_data);
}
else {
if (!CalcModePostRandEff_already_done) {
CHECK(y_has_been_set_);
}
}
if (InitializeModeCovMat) {
CHECK(cov_pars != nullptr);
}
if (CalcModePostRandEff_already_done) {
negll = neg_log_likelihood_;//Whenever the mode is calculated that likelihood is calculated as well. So we might as well just return the saved neg_log_likelihood_
}
else {//not CalcModePostRandEff_already_done
if (InitializeModeCovMat) {
//We reset the initial modes to 0. This is done to avoid that different calls to EvalLAApproxNegLogLikelihood lead to (very small) differences.
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->InitializeModeAvec();//TODO: maybe ommit this step?
}
const vec_t cov_pars_vec = Eigen::Map<const vec_t>(cov_pars, num_cov_par_);
SetCovParsComps(cov_pars_vec);
if (vecchia_approx_) {
CalcCovFactor(true, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
}//end InitializeModeCovMat
negll = -CalcModePostRandEff(fixed_effects);
}//end not CalcModePostRandEff_already_done
}//end EvalLAApproxNegLogLikelihood
/*!
* \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost)
* \param num_data_pred Number of data points for which predictions are made
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
*/
void SetPredictionData(int num_data_pred,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) {
CHECK(num_data_pred > 0);
if (cluster_ids_data_pred == nullptr) {
cluster_ids_data_pred_.clear();
}
else {
cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred);
}
if (re_group_data_pred == nullptr) {
re_group_levels_pred_.clear();
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_);
}
if (re_group_rand_coef_data_pred == nullptr) {
re_group_rand_coef_data_pred_.clear();
}
else {
re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_);
}
if (gp_coords_data_pred == nullptr) {
gp_coords_data_pred_.clear();
}
else {
gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_);
}
if (gp_rand_coef_data_pred == nullptr) {
gp_rand_coef_data_pred_.clear();
}
else {
gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_);
}
if (covariate_data_pred == nullptr) {
covariate_data_pred_.clear();
}
else {
covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_);
}
num_data_pred_ = num_data_pred;
}//end SetPredictionData
/*!
* \brief Make predictions: calculate conditional mean and variances or covariance matrix
* Note: You should pre-allocate memory for out_predict
* Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat==false && predict_var==false)
* or num_data_pred * (1 + num_data_pred) if the predictive covariance matrix is also calculated (predict_cov_mat==true)
* or num_data_pred * 2 if predictive variances are also calculated (predict_var==true)
* \param cov_pars_pred Covariance parameters of components
* \param y_obs Response variable for observed data
* \param num_data_pred Number of data points for which predictions are made
* \param[out] out_predict Predictive/conditional mean at prediciton points followed by the predictive covariance matrix in column-major format (if predict_cov_mat==true) or the predictive variances (if predict_var==true)
* \param calc_cov_factor If true, the covariance matrix of the observed data is factorized otherwise a previously done factorization is used (default=true)
* \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_response If true, the response variable (label) is predicted, otherwise the latent random effects (this is only relevant for non-Gaussian data) (default=false)
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
* \param coef_pred Coefficients for linear covariates
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param use_saved_data If true, saved data is used and some arguments are ignored
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used)
* \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data)
* \param fixed_effects_pred Fixed effects component of location parameter for predicted data (only used for non-Gaussian data)
*/
void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred,
double* out_predict, bool calc_cov_factor = true, bool predict_cov_mat = false, bool predict_var = false, bool predict_response = false,
const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false,
const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1,
const double* fixed_effects = nullptr, const double* fixed_effects_pred = nullptr) {
//First check whether previously set data should be used and load it if required
std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
if (use_saved_data) {
if (num_data_pred > 0) {
CHECK(num_data_pred == num_data_pred_);
}
else {
num_data_pred = num_data_pred_;
}
re_group_levels_pred = re_group_levels_pred_;
if (cluster_ids_data_pred_.empty()) {
cluster_ids_data_pred = nullptr;
}
else {
cluster_ids_data_pred = cluster_ids_data_pred_.data();
}
if (re_group_rand_coef_data_pred_.empty()) {
re_group_rand_coef_data_pred = nullptr;
}
else {
re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data();
}
if (gp_coords_data_pred_.empty()) {
gp_coords_data_pred = nullptr;
}
else {
gp_coords_data_pred = gp_coords_data_pred_.data();
}
if (gp_rand_coef_data_pred_.empty()) {
gp_rand_coef_data_pred = nullptr;
}
else {
gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data();
}
if (covariate_data_pred_.empty()) {
covariate_data_pred = nullptr;
}
else {
covariate_data_pred = covariate_data_pred_.data();
}
}
else {
if (re_group_data_pred != nullptr) {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred);
}
}
//Some checks
CHECK(num_data_pred > 0);
//Check whether required data is missing
if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) {
Log::REFatal("Missing covariate data for random coefficients for grouped random effects for making predictions");
}
if (gp_coords_data_pred == nullptr && num_gp_ > 0) {
Log::REFatal("Missing coordinate data for Gaussian process for making predictions");
}
if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) {
Log::REFatal("Missing covariate data for random coefficients for Gaussian process for making predictions");
}
if (cluster_ids_data_pred == nullptr && num_clusters_ > 1) {
Log::REFatal("Missing cluster_id data for making predictions");
}
if (!gauss_likelihood_ && predict_response && predict_cov_mat) {
Log::REFatal("Calculation of the predictive covariance matrix is not supported when predicting the response variable (label) for non-Gaussian data");
}
if (predict_cov_mat && predict_var) {
Log::REFatal("Calculation of both the predictive covariance matrix and variances is not supported. Choose one of these option (predict_cov_mat or predict_var)");
}
if (vecchia_approx_ && gauss_likelihood_ && predict_var) {
Log::REDebug("Calculation of only predictive variances is currently not optimized for the Vecchia approximation. If you need only variances and this takes too much time or memory, contact the developer or open a GitHub issue.");
}
if (has_covariates_) {
CHECK(covariate_data_pred != nullptr);
CHECK(coef_pred != nullptr);
}
if (y_obs == nullptr) {
if (!y_has_been_set_) {
Log::REFatal("Response variable data is not provided and has not been set before");
}
}
if (num_data_pred > 10000 && predict_cov_mat) {
double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
Log::REWarning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package or open a GitHub issue and ask to implement this feature.", num_data_pred, mem_size);
}
if (vecchia_approx_) {
if (vecchia_pred_type != nullptr) {
string_t vecchia_pred_type_S = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_S) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::REFatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_S.c_str());
}
vecchia_pred_type_ = vecchia_pred_type_S;
}
if (num_neighbors_pred > 0) {
num_neighbors_pred_ = num_neighbors_pred;
}
}
// Initialize linear predictor related terms and covariance parameters
vec_t coef, mu;//mu = linear regression predictor
if (has_covariates_) {//calculate linear regression term
coef = Eigen::Map<const vec_t>(coef_pred, num_coef_);
den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_);
mu = X_pred * coef;
}
vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_);
//Set up cluster IDs
std::map<gp_id_t, int> num_data_per_cluster_pred;
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred;
std::vector<gp_id_t> unique_clusters_pred;
data_size_t num_clusters_pred;
SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred,
data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred);
//Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made
bool pred_for_observed_data = false;
for (const auto& cluster_i : unique_clusters_pred) {
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) {
pred_for_observed_data = true;
break;
}
}
//Factorize covariance matrix and calculate Psi^{-1}y_obs or calculate Laplace approximation (if required)
const double* fixed_effects_ptr = fixed_effects;
vec_t fixed_effects_vec;
if (pred_for_observed_data) {//TODO (low prio): this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred)
// Set response data and fixed effects
if (gauss_likelihood_) {
if (has_covariates_ || fixed_effects != nullptr) {
vec_t resid;
if (y_obs != nullptr) {
resid = Eigen::Map<const vec_t>(y_obs, num_data_);
}
else {
resid = y_vec_;
}
if (has_covariates_) {
resid -= X_ * coef;
}
//add external fixed effects to linear predictor
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
resid[i] -= fixed_effects[i];
}
}
SetY(resid.data());
}//end if has_covariates_
else {//no covariates
if (y_obs != nullptr) {
SetY(y_obs);
}
}//end no covariates
}//end if gauss_likelihood_
else {//if not gauss_likelihood_
if (has_covariates_) {
fixed_effects_vec = X_ * coef;
//add external fixed effects to linear predictor
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
fixed_effects_ptr = fixed_effects_vec.data();
}
if (y_obs != nullptr) {
SetY(y_obs);
}
}//end if not gauss_likelihood_
SetCovParsComps(cov_pars);
if (!(vecchia_approx_ && gauss_likelihood_)) {// no need to call CalcCovFactor here for the Vecchia approximation for Gaussian data, this is done in the prediction steps below
if (calc_cov_factor) {
if (gauss_likelihood_) {
CalcCovFactor(false, true, 1., false);// Create covariance matrix and factorize it
}
else {//not gauss_likelihood_
//We reset the initial modes to 0. This is done to avoid that different calls to the prediction function lead to (very small) differences
// as the mode is calculated from different starting values.
// If one is willing to accept these (very) small differences, one could disable this with the advantage of having faster predictions
// as the mode does not need to be found anew.
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->InitializeModeAvec();
}
if (vecchia_approx_) {
CalcCovFactor(false, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
CalcModePostRandEff(fixed_effects_ptr);
}//end not gauss_likelihood_
}//end if calc_cov_factor
if (gauss_likelihood_) {
CalcYAux();//note: in some cases a call to CalcYAux() could be avoided (e.g. no covariates and not GPBoost algorithm)...
}
}//end not (vecchia_approx_ && gauss_likelihood_)
}//end if pred_for_observed_data (factorizatiion of covariance matrix)
// Loop over different clusters to calculate predictions
for (const auto& cluster_i : unique_clusters_pred) {
//Case 1: no data observed for this Gaussian process with ID 'cluster_i'
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) {
T_mat psi;
std::vector<std::shared_ptr<RECompBase<T_mat>>> re_comps_cluster_i;
int num_REs_pred = num_data_per_cluster_pred[cluster_i];
//Calculate covariance matrix if needed
if (predict_cov_mat || predict_var || predict_response) {
if (vecchia_approx_) {
//TODO: move this code out into another function for better readability
// Initialize RE components
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred,
gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors)
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
}
// Calculate a Cholesky factor
sp_mat_t B_cluster_i;
sp_mat_t D_inv_cluster_i;
std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function
std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function
CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i);
//Calculate Psi
sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
D_sqrt.setIdentity();
D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5);
sp_mat_t B_inv_D_sqrt;
eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true);
psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose();
}//end vecchia_approx_
else {//not vecchia_approx_
CreateREComponents(num_data_pred,
num_re_group_,
data_indices_per_cluster_pred,
cluster_i,
re_group_levels_pred,
num_data_per_cluster_pred,
num_re_group_rand_coef_,
re_group_rand_coef_data_pred,
ind_effect_group_rand_coef_,
num_gp_,
gp_coords_data_pred,
dim_gp_coords_,
gp_rand_coef_data_pred,
num_gp_rand_coef_,
cov_fct_,
cov_fct_shape_,
ind_intercept_gp_,
true,
re_comps_cluster_i);
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
num_REs_pred = re_comps_cluster_i[0]->GetNumUniqueREs();
}
else {
num_REs_pred = num_data_per_cluster_pred[cluster_i];
}
psi.resize(num_REs_pred, num_REs_pred);
if (gauss_likelihood_) {
psi.setIdentity();//nugget effect
}
else {
psi.setZero();
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
re_comps_cluster_i[j]->CalcSigma();
psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get()));
}
}//end not vecchia_approx_
if (gauss_likelihood_) {
psi *= cov_pars[0];//back-transform
}
}//end calculation of covariance matrix
// Add external fixed_effects
vec_t mean_pred_id = vec_t::Zero(num_data_per_cluster_pred[cluster_i]);
if (fixed_effects_pred != nullptr) {//add externaly provided fixed effects
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]];
}
}
if (has_covariates_) {//add linear regression predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]];
}
}
bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_);
vec_t var_pred_id;
if (predict_var_or_response) {
var_pred_id = psi.diagonal();
}
//map from predictions from random effects scale b to "data scale" Zb
if (only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_) {
if (predict_var_or_response) {
vec_t var_pred_id_on_RE_scale = var_pred_id;
var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
var_pred_id[i] = var_pred_id_on_RE_scale[(re_comps_cluster_i[0]->random_effects_indices_of_data_)[i]];
}
}
if (predict_cov_mat) {
T_mat cov_mat_pred_id_on_RE_scale = psi;
sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred);
std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
triplets[i] = Triplet_t(i, (re_comps_cluster_i[0]->random_effects_indices_of_data_)[i], 1.);
}
Zpred.setFromTriplets(triplets.begin(), triplets.end());
psi = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose();
}
}//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_
// Transform to response scale for non-Gaussian data if needed
if (!gauss_likelihood_ && predict_response) {
likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var);
}
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
//Write covariance / variance on output
if (!predict_response || gauss_likelihood_) {//this is not done if predict_response==true for non-Gaussian data
if (predict_cov_mat) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i);
}
}
}//end predict_cov_mat
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
}//end !predict_response || gauss_likelihood_
else { // predict_response && !gauss_likelihood_
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
}//end write covariance / variance on output
}//end cluster_i with no observed data
else {
//Case 2: there exists observed data for this cluster_i (= typically case)
den_mat_t gp_coords_mat_pred;
std::vector<data_size_t> random_effects_indices_of_data_pred;
int num_REs_pred = num_data_per_cluster_pred[cluster_i];
if (num_gp_ > 0) {
std::vector<double> gp_coords_pred;
for (int j = 0; j < dim_gp_coords_; ++j) {
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]);
}
}
gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_);
}
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]);
std::vector<string_t> re_group_levels_pred_unique;
std::map<re_group_t, int> map_group_label_index_pred;
int num_group_pred = 0;
int ii = 0;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
if (map_group_label_index_pred.find(re_group_levels_pred[0][id]) == map_group_label_index_pred.end()) {
map_group_label_index_pred.insert({ re_group_levels_pred[0][id], num_group_pred });
re_group_levels_pred_unique.push_back(re_group_levels_pred[0][id]);
random_effects_indices_of_data_pred[ii] = num_group_pred;
num_group_pred += 1;
}
else {
random_effects_indices_of_data_pred[ii] = map_group_label_index_pred[re_group_levels_pred[0][id]];
}
ii += 1;
}
re_group_levels_pred[0] = re_group_levels_pred_unique;
num_REs_pred = (int)re_group_levels_pred[0].size();
}//end only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
else if (only_one_GP_calculations_on_RE_scale_) {
random_effects_indices_of_data_pred = std::vector<data_size_t>(num_data_per_cluster_pred[cluster_i]);
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_pred, num_data_per_cluster_pred[cluster_i], uniques, unique_idx);
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
random_effects_indices_of_data_pred[i] = unique_idx[i];
}
den_mat_t gp_coords_mat_pred_unique = gp_coords_mat_pred(uniques, Eigen::all);
gp_coords_mat_pred = gp_coords_mat_pred_unique;
num_REs_pred = (int)gp_coords_mat_pred.rows();
}//end only_one_GP_calculations_on_RE_scale_
// Initialize predictive mean and covariance
vec_t mean_pred_id;
if (only_one_GP_calculations_on_RE_scale_ ||
only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
mean_pred_id = vec_t(num_REs_pred);
}
else {
mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
}
T_mat cov_mat_pred_id;
vec_t var_pred_id;
// Calculate predictions
//Special case: Vecchia aproximation for Gaussian data
if (vecchia_approx_ && gauss_likelihood_) {//TODO: move this code to another function for better readability
std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][ind_intercept_gp_]);
int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i];
double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 4000) {
Log::REDebug("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package or open a GitHub issue and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size);
}
//TODO: implement a more efficient version when only predictive variances are required and not full covariance matrices
bool predict_var_or_cov_mat = predict_var || predict_cov_mat;
if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") {
CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_obs_first_cond_all") {
CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_pred_first") {
CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") {
CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") {
CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_var_or_cov_mat, mean_pred_id, cov_mat_pred_id);
}
if (predict_var) {
var_pred_id = cov_mat_pred_id.diagonal();
if (!predict_cov_mat) {
cov_mat_pred_id.resize(0, 0);
}
}
}//end (vecchia_approx_ && gauss_likelihood_)
else {// not vecchia_approx_ or not gauss_likelihood_
//General case: either non-Gaussian data or Gaussian data without the Vecchia approximation
//NOTE: if vecchia_approx_==true and gauss_likelihood_==false, the cross-covariance matrix Sigma_{1,2} = cov(x_pred,x) is not approximated but the exact version is used
bool predict_var_or_response = predict_var || (predict_response && !gauss_likelihood_);//variance needs to be available for resposne prediction for non-Gaussian data
CalcPred(cluster_i,
num_data_pred,
num_data_per_cluster_pred,
data_indices_per_cluster_pred,
re_group_levels_pred,
re_group_rand_coef_data_pred,
gp_coords_mat_pred,
gp_rand_coef_data_pred,
predict_cov_mat,
predict_var_or_response,
mean_pred_id,
cov_mat_pred_id,
var_pred_id);
//map from predictions from random effects scale b to "data scale" Zb
if (only_one_GP_calculations_on_RE_scale_ ||
only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
vec_t mean_pred_id_on_RE_scale = mean_pred_id;
mean_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] = mean_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]];
}
if (predict_var_or_response) {
vec_t var_pred_id_on_RE_scale = var_pred_id;
var_pred_id = vec_t(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
var_pred_id[i] = var_pred_id_on_RE_scale[random_effects_indices_of_data_pred[i]];
}
}
if (predict_cov_mat) {
T_mat cov_mat_pred_id_on_RE_scale = cov_mat_pred_id;
sp_mat_t Zpred(num_data_per_cluster_pred[cluster_i], num_REs_pred);
std::vector<Triplet_t> triplets(num_data_per_cluster_pred[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
triplets[i] = Triplet_t(i, random_effects_indices_of_data_pred[i], 1.);
}
Zpred.setFromTriplets(triplets.begin(), triplets.end());
cov_mat_pred_id = Zpred * cov_mat_pred_id_on_RE_scale * Zpred.transpose();
}
}//end only_one_GP_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
}//end not vecchia_approx_ or not gauss_likelihood_
//add externaly provided fixed effects
if (fixed_effects_pred != nullptr) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += fixed_effects_pred[data_indices_per_cluster_pred[cluster_i][i]];
}
}
//add linear regression predictor
if (has_covariates_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
mean_pred_id[i] += mu[data_indices_per_cluster_pred[cluster_i][i]];
}
}
if (!gauss_likelihood_ && predict_response) {
likelihood_[unique_clusters_[0]]->PredictResponse(mean_pred_id, var_pred_id, predict_var);
}
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
//Write covariance / variance on output
if (predict_cov_mat) {
if (gauss_likelihood_) {
cov_mat_pred_id *= cov_pars[0];
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);
}
}
}//end predict_cov_mat
if (predict_var) {
if (gauss_likelihood_) {
var_pred_id *= cov_pars[0];
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i] + num_data_pred] = var_pred_id[i];
}
}//end predict_var
//end write covariance / variance on output
}//end cluster_i with data
}//end loop over cluster
//Set cross-covariances between different independent clusters to 0
if (predict_cov_mat && unique_clusters_pred.size() > 1 && (!predict_response || gauss_likelihood_)) {
for (const auto& cluster_i : unique_clusters_pred) {
for (const auto& cluster_j : unique_clusters_pred) {
if (cluster_i != cluster_j) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_j]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_j][j] + num_data_pred] = 0.;
}
}
}
}
}
}
}//end Predict
/*!
* \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale)
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param[out] init_cov_pars Initial values for covariance parameters of RE components
*/
void FindInitCovPar(const double* y_data, double* init_cov_pars) {
double mean = 0;
double var = 0;
int ind_par;
if (gauss_likelihood_) {
//determine initial value for nugget effect
for (int i = 0; i < num_data_; ++i) {//TODO: run in parallel
mean += y_data[i];
}
mean /= num_data_;
for (int i = 0; i < num_data_; ++i) {
var += (y_data[i] - mean) * (y_data[i] - mean);
}
var /= (num_data_ - 1);
init_cov_pars[0] = var;
ind_par = 1;
}//end Gaussian data
else {//non-Gaussian data
ind_par = 0;
}
if (vecchia_approx_) {//Neither distances nor coordinates are saved for random coefficient GPs in the Vecchia approximation -> cannot find initial parameters -> just copy the ones from the intercept GP
// find initial values for intercept process
int num_par_j = ind_par_[1] - ind_par_[0];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][0]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
//set the same values to random coefficient processes
for (int j = 1; j < num_gp_total_; ++j) {
num_par_j = ind_par_[j + 1] - ind_par_[j];
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
else {
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_j = ind_par_[j + 1] - ind_par_[j];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
}//end FindInitCovPar
int num_cov_par() {
return(num_cov_par_);
}
/*!
* \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting
* Note: only used in GPBoost for combined Gaussian process tree-boosting (this is called from 'objective_function_->NewtonUpdateLeafValues'). It is assumed that 'CalcYAux' has been called before (from 'objective_function_->GetGradients').
* \param data_leaf_index Leaf index for every data point (array of size num_data)
* \param num_leaves Number of leaves
* \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves)
* \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1.
*/
void NewtonUpdateLeafValues(const int* data_leaf_index,
const int num_leaves, double* leaf_values, double marg_variance = 1.) {
if (!gauss_likelihood_) {
Log::REFatal("Newton updates for leaf values is only supported for Gaussian data");
}
CHECK(y_aux_has_been_calculated_);//y_aux_ has already been calculated when calculating the gradient for finding the tree structure from 'GetGradients' in 'regression_objetive.hpp'
den_mat_t HTPsiInvH(num_leaves, num_leaves);
vec_t HTYAux(num_leaves);
HTPsiInvH.setZero();
HTYAux.setZero();
for (const auto& cluster_i : unique_clusters_) {
//Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i
std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.);
}
den_mat_t HTPsiInvH_cluster_i;
if (vecchia_approx_) {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx.
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
sp_mat_t BH = B_[cluster_i] * H_cluster_i;
HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH);
}
else {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
if (only_grouped_REs_use_woodbury_identity_) {
sp_mat_t ZtH_cluster_i = Zt_[cluster_i] * H_cluster_i;
T_mat MInvSqrtZtH;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZtH = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtH_cluster_i;
}
else {
CalcPsiInvSqrtH(ZtH_cluster_i, MInvSqrtZtH, cluster_i, true);
}
HTPsiInvH_cluster_i = H_cluster_i.transpose() * H_cluster_i - MInvSqrtZtH.transpose() * MInvSqrtZtH;
}
else {
T_mat PsiInvSqrtH;
CalcPsiInvSqrtH(H_cluster_i, PsiInvSqrtH, cluster_i, true);
HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH;
}
}
HTPsiInvH += HTPsiInvH_cluster_i;
}
HTYAux *= marg_variance;
vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux);
for (int i = 0; i < num_leaves; ++i) {
leaf_values[i] = new_leaf_values[i];
}
}//end NewtonUpdateLeafValues
private:
// RESPONSE DATA
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief If true, the response variables have a Gaussian likelihood, otherwise not */
data_size_t gauss_likelihood_ = true;
/*! \brief Likelihood objects */
std::map<gp_id_t, std::unique_ptr<Likelihood<T_chol>>> likelihood_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data */
double neg_log_likelihood_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data of previous iteration in optimization used for convergence checking */
double neg_log_likelihood_lag1_;
/*! \brief Value of negative log-likelihood or approximate marginal negative log-likelihood for non-Gaussian data after linear regression coefficients are update (this equals neg_log_likelihood_lag1_ if there are no regression coefficients). This is used for step-size checking for the covariance parameters */
double neg_log_likelihood_after_lin_coef_update_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y */
std::map<gp_id_t, vec_t> y_;
/*! \brief Copy of response data (used only for Gaussian data and if there are also linear covariates since then y_ is modified during the optimization algorithm and this contains the original data) */
vec_t y_vec_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y of integer type (used only for non-Gaussian likelihood) */
std::map<gp_id_t, vec_int_t> y_int_;
// Note: the response variable data is saved in y_ / y_int_ (depending on the likelihood type) for Gaussian data with no covariates and for all non-Gaussian data.
// For Gaussian data with covariates, the response variables is saved in y_vec_ and y_ is replaced by y - X * beta during the optimization
/*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */
std::map<gp_id_t, vec_t> y_aux_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */
std::map<gp_id_t, vec_t> y_tilde_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: Z * L ^ -T * L ^ -1 * Z ^ T * y, L = chol(Sigma^-1 + Z^T * Z) (used for various computations when only_grouped_REs_use_woodbury_identity_==true) */
std::map<gp_id_t, vec_t> y_tilde2_;
/*! \brief Indicates whether y_aux_ has been calculated */
bool y_aux_has_been_calculated_ = false;
/*! \brief If true, the response variable data has been set (otherwise y_ is empty) */
bool y_has_been_set_ = false;
// GROUPED RANDOM EFFECTS
/*! \brief Number of grouped (intercept) random effects */
data_size_t num_re_group_ = 0;
/*! \brief Number of grouped random coefficients */
data_size_t num_re_group_rand_coef_ = 0;
/*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */
std::vector<int> ind_effect_group_rand_coef_;
/*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */
data_size_t num_re_group_total_ = 0;
// GAUSSIAN PROCESS
/*! \brief 1 if there is a Gaussian process 0 otherwise */
data_size_t num_gp_ = 0;
/*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove?
int8_t GP_type_ = 0;
/*! \brief Number of random coefficient GPs */
data_size_t num_gp_rand_coef_ = 0;
/*! \brief Total number of GPs (random intercepts plus random coefficients) */
data_size_t num_gp_total_ = 0;
/*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */
int ind_intercept_gp_;
/*! \brief Dimension of the coordinates (=number of features) for Gaussian process */
int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Type of covariance(kernel) function for Gaussian processes */
string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double cov_fct_shape_ = 0.;
// RANDOM EFFECT / GP COMPONENTS
/*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */
std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T_mat>>>> re_comps_;
/*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] and ind_par_[i+1] -1 are the indices of the first and last parameter of component number i (counting starts at 1) */
std::vector<data_size_t> ind_par_;
/*! \brief Number of covariance parameters */
data_size_t num_cov_par_;
/*! \brief Total number of random effect components (grouped REs plus other GPs) */
data_size_t num_comps_total_ = 0;
// SPECIAL CASES OF RE MODELS FOR FASTER CALCULATIONS
/*! \brief If true, the Woodbury, Sherman and Morrison matrix inversion formula is used for calculating the inverse of the covariance matrix (only used if there are only grouped REs and no Gaussian processes) */
bool only_grouped_REs_use_woodbury_identity_ = false;
/*! \brief True if there is only one grouped random effect component, and (all) calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */
bool only_one_grouped_RE_calculations_on_RE_scale_ = false;
/*! \brief True if there is only one grouped random effect component for Gaussian data, can calculations for predictions (only) are done on the b-scale instead of the Zb-scale */
bool only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = false;
/*! \brief True if there is only one GP random effect component, and calculations are done on the b-scale instead of the Zb-scale (currently used only for non-Gaussian data) */
bool only_one_GP_calculations_on_RE_scale_ = false;
// COVARIANCE MATRIX AND CHOLESKY FACTORS OF IT
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky decomposition solver of covariance matrices Psi (for Gaussian data) */
std::map<gp_id_t, T_chol> chol_facts_solve_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else?
std::map<gp_id_t, T_mat> chol_facts_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */ //TODO: remove and construct on demand?
std::map<gp_id_t, T_mat> Id_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix */
std::map<gp_id_t, cs> Id_cs_;
/*! \brief If true, a symbolic decomposition is first done when calculating the Cholesky factor of the covariance matrix (only for sparse matrices) */
bool do_symbolic_decomposition_ = true;
/*! \brief Collects inverse covariance matrices Psi^{-1} (usually not saved, but used e.g. in Fisher scoring without the Vecchia approximation) */
std::map<gp_id_t, T_mat> psi_inv_;
/*! \brief Inverse covariance matrices Sigma^-1 of random effects. This is only used if only_grouped_REs_use_woodbury_identity_==true (if there are only grouped REs) */
std::map<gp_id_t, sp_mat_t> SigmaI_;
/*! \brief Pointer to covariance matrix of the random effects (sum of all components). This is only used for non-Gaussian data and if only_grouped_REs_use_woodbury_identity_==false. In the Gaussian case this needs not be saved */
std::map<gp_id_t, std::shared_ptr<T_mat>> ZSigmaZt_;
// COVARIATE DATA FOR LINEAR REGRESSION TERM
/*! \brief If true, the model linearly incluses covariates */
bool has_covariates_ = false;
/*! \brief Number of covariates */
int num_coef_;
/*! \brief Covariate data */
den_mat_t X_;
// OPTIMIZER PROPERTIES
/*! \brief List of supported optimizers for covariance parameters */
const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" };
/*! \brief List of supported optimizers for regression coefficients */
const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" };
/*! \brief List of supported convergence criteria used for terminating the optimization algorithm */
const std::set<string_t> SUPPORTED_CONV_CRIT_{ "relative_change_in_parameters", "relative_change_in_log_likelihood" };
/*! \brief Maximal number of steps for which step halving for the learning rate is done */
int MAX_NUMBER_HALVING_STEPS_ = 30;
// WOODBURY IDENTITY FOR GROUPED RANDOM EFFECTS ONLY
/*! \brief Collects matrices Z^T (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> Zt_;
/*! \brief Collects matrices Z^TZ (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, sp_mat_t> ZtZ_;
/*! \brief Collects vectors Z^Ty (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, vec_t> Zty_;
/*! \brief Cumulative number of random effects for components (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects, otherwise these matrices are saved only in the indepedent RE components) */
std::map<gp_id_t, std::vector<data_size_t>> cum_num_rand_eff_;//The random effects of component j start at cum_num_rand_eff_[0][j]+1 and end at cum_num_rand_eff_[0][j+1]
/*! \brief Sum of squared entries of Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, std::vector<double>> Zj_square_sum_;
/*! \brief Collects matrices Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects) */
std::map<gp_id_t, std::vector<sp_mat_t>> ZtZj_;
/*! \brief Collects matrices L^-1 * Z^T * Z_j for every random effect component (usually not saved, only saved when only_grouped_REs_use_woodbury_identity_=true i.e. when there are only grouped random effects and when Fisher scoring is done) */
std::map<gp_id_t, std::vector<T_mat>> LInvZtZj_;
// VECCHIA APPROXIMATION for GP
/*! \brief If true, the Veccia approximation is used for the Gaussian process */
bool vecchia_approx_ = false;
/*! \brief If true, a memory optimized version of the Vecchia approximation is used (at the expense of being slightly slower). THiS IS CURRENTLY NOT IMPLEMENTED */
bool vecchia_approx_optim_memory = false;
/*! \brief The number of neighbors used in the Vecchia approximation */
int num_neighbors_;
/*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */
string_t vecchia_ordering_ = "none";
/*! \brief The number of neighbors used in the Vecchia approximation for making predictions */
int num_neighbors_pred_;
/*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */
string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only",
"order_obs_first_cond_all", "order_pred_first",
"latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" };
/*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */
std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_;
/*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_;
/*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m.
/*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */
std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_;
/*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> B_;
/*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> D_inv_;
/*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_;
/*! \brief Collects derivatives of matrices D for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_;
/*! \brief Triplets for intializing the matrices B */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_;
/*! \brief Triplets for intializing the matrices B_grad */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_;
// CLUSTERs of INDEPENDENT REALIZATIONS
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */
std::map<gp_id_t, int> num_data_per_cluster_;
/*! \brief Number of independent realizations of the REs/GPs */
data_size_t num_clusters_;
/*! \brief Unique labels of independent realizations */
std::vector<gp_id_t> unique_clusters_;
/*! \brief Variance of idiosyncratic error term (nugget effect) */
double sigma2_;
// PREDICTION
/*! \brief Cluster IDs for prediction */
std::vector<gp_id_t> cluster_ids_data_pred_;
/*! \brief Levels of grouped RE for prediction */
std::vector<std::vector<string_t>> re_group_levels_pred_;
/*! \brief Covariate data for grouped random RE for prediction */
std::vector<double> re_group_rand_coef_data_pred_;
/*! \brief Coordinates for GP for prediction */
std::vector<double> gp_coords_data_pred_;
/*! \brief Covariate data for random GP for prediction */
std::vector<double> gp_rand_coef_data_pred_;
/*! \brief Covariate data for linear regression term */
std::vector<double> covariate_data_pred_;
/*! \brief Number of prediction points */
data_size_t num_data_pred_;
/*! \brief Nesterov schedule */
double NesterovSchedule(int iter, int momentum_schedule_version = 0,
double nesterov_acc_rate = 0.5, int momentum_offset = 2) {
if (iter < momentum_offset) {
return(0.);
}
else {
if (momentum_schedule_version == 0) {
return(nesterov_acc_rate);
}
else if (momentum_schedule_version == 1) {
return(1. - (3. / (6. + iter)));
}
else {
return(0.);
}
}
}
/*! \brief mutex for threading safe call */
std::mutex mutex_;
/*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects.
Id_cs.nzmax = dim_I;
Id_cs.m = dim_I;
Id_cs.n = dim_I;
Id_[cluster_i].makeCompressed();
Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr());
Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr());
Id_cs.x = Id_[cluster_i].valuePtr();
Id_cs.nz = -1;
Id_cs_.insert({ cluster_i, Id_cs });
}
/*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
int dim_I = only_grouped_REs_use_woodbury_identity_ ? cum_num_rand_eff_[cluster_i][num_comps_total_] : num_data_per_cluster_[cluster_i];
T3 I(dim_I, dim_I);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
}
/*!
* \brief Set response variable data y_ (and calculate Z^T * y if only_grouped_REs_use_woodbury_identity_ == true)
* \param y_data Response variable data
*/
void SetY(const double* y_data) {
if (gauss_likelihood_) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_);//TODO: Is there a more efficient way that avoids copying?
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
}
}
if (only_grouped_REs_use_woodbury_identity_) {
CalcZtY();
}
}//end gauss_likelihood_
else {//not gauss_likelihood_
(*likelihood_[unique_clusters_[0]]).template CheckY<double>(y_data, num_data_);
if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
}//end not gauss_likelihood_
y_has_been_set_ = true;
}
/*!
* \brief Set response variable data y_ if data is of type float (used for GPBoost algorithm since labels are float)
* \param y_data Response variable data
*/
void SetY(const float* y_data) {
if (gauss_likelihood_) {
Log::REFatal("SetY is not implemented for Gaussian data and lables of type float (since it is not needed)");
}//end gauss_likelihood_
else {//not gauss_likelihood_
(*likelihood_[unique_clusters_[0]]).template CheckY<float>(y_data, num_data_);
if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
y_int_[cluster_i] = vec_int_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_int_[cluster_i][j] = static_cast<int>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<int>(y_int_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = static_cast<double>(y_data[data_indices_per_cluster_[cluster_i][j]]);
}
(*likelihood_[cluster_i]).template CalculateNormalizingConstant<double>(y_[cluster_i].data(), num_data_per_cluster_[cluster_i]);
}
}
}
y_has_been_set_ = true;
}
/*!
* \brief Return (last used) response variable data
* \param[out] y Response variable data (memory needs to be preallocated)
*/
void GetY(double* y) {
if (!y_has_been_set_) {
Log::REFatal("Respone variable data has not been set");
}
if (has_covariates_ && gauss_likelihood_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
y[i] = y_vec_[i];
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "double") {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
y[data_indices_per_cluster_[cluster_i][i]] = y_[cluster_i][i];
}
}
}
else if (likelihood_[unique_clusters_[0]]->label_type() == "int") {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
y[data_indices_per_cluster_[cluster_i][i]] = y_int_[cluster_i][i];
}
}
}
}
/*!
* \brief Return covariate data
* \param[out] covariate_data covariate data
*/
void GetCovariateData(double* covariate_data) {
if (!has_covariates_) {
Log::REFatal("Model does not have covariates for a linear predictor");
}
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_ * num_coef_; ++i) {
covariate_data[i] = X_.data()[i];
}
}
/*!
* \brief Calculate Z^T*y (use only when only_grouped_REs_use_woodbury_identity_ == true)
*/
void CalcZtY() {
for (const auto& cluster_i : unique_clusters_) {
Zty_[cluster_i] = Zt_[cluster_i] * y_[cluster_i];
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_
*/
void GetYAux(double* y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_; ++j) {
y_aux[j] = y_aux_[unique_clusters_[0]][j];
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_
*/
void GetYAux(vec_t& y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {
y_aux = y_aux_[unique_clusters_[0]];
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i];
}
}
}
/*!
* \brief Calculate the gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F (only used for non-Gaussian data)
* \param[out] grad_F Gradient of the Laplace-approximated negative log-likelihood with respect to the fixed effects F. This vector needs to be pre-allocated of length num_data_
* \param fixed_effects Fixed effects component of location parameter
*/
void CalcGradFLaplace(double* grad_F, const double* fixed_effects = nullptr) {
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]);
//map fixed effects to clusters (if needed)
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {//vecchia_approx_
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
B_grad_[cluster_i],
D_grad_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxGroupedRE<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cum_num_rand_eff_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
re_comps_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
else {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
re_comps_[cluster_i],
false,
true,
nullptr,
grad_F_cluster_i,
false);
}
}//end not vecchia_approx_
//write on output
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
#pragma omp parallel for schedule(static)//write on output
for (int j = 0; j < num_data_; ++j) {
grad_F[j] = grad_F_cluster_i[j];
}
}
else {//more than one cluster and order of samples matters
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
grad_F[data_indices_per_cluster_[cluster_i][j]] = grad_F_cluster_i[j];
}
} // end more than one cluster
}//end loop over cluster
}//end CalcGradFLaplace
/*!
* \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if sparse matrices are used
* \param psi Covariance matrix for which the Cholesky decomposition should be done
* \param cluster_i Cluster index for which the Cholesky factor is calculated
* \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices)
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
chol_facts_solve_[cluster_i].analyzePattern(psi);
}
chol_facts_solve_[cluster_i].factorize(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
chol_facts_[cluster_i].makeCompressed();
}
/*!
* \brief Do Cholesky decomposition and save in chol_facts_ (actual matrix) and chol_facts_solve_ (Eigen solver) if dense matrices are used
* \param psi Covariance matrix for which the Cholesky decomposition should be done
* \param cluster_i Cluster index for which the Cholesky factor is calculated
* \param analyze_pattern If true, the pattern is analyzed as well (only for sparse matrices)
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
Log::REWarning("Pattern of Cholesky factor is not analyzed when dense matrices are used.");
}
chol_facts_solve_[cluster_i].compute(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
}
/*!
* \brief Caclulate Psi^(-1) if sparse matrices are used
* \param psi_inv[out] Inverse covariance matrix
* \param cluster_i Cluster index for which Psi^(-1) is calculated
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (only_grouped_REs_use_woodbury_identity_) {
sp_mat_t MInvSqrtZt;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i];
}
else {
sp_mat_t L_inv;
eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Id_[cluster_i], L_inv, true);
MInvSqrtZt = L_inv * Zt_[cluster_i];
////Alternative option (crashes when eigen_sp_Lower_sp_RHS_cs_solve uses sp_Lower_sp_RHS_cs_solve / cs_spsolve due to Eigen bug)
//eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], Zt_[cluster_i], MInvSqrtZt, true);
}
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;//this is slow since n can be large (O(n^2*m))
psi_inv.diagonal().array() += 1.0;
}
else {
//Using CSparse function 'cs_spsolve'
cs L_cs = cs();//Prepare LHS
L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros();
L_cs.m = num_data_per_cluster_[cluster_i];
L_cs.n = num_data_per_cluster_[cluster_i];
L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr());
L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr());
L_cs.x = chol_facts_[cluster_i].valuePtr();
L_cs.nz = -1;
//Invert Cholesky factor
sp_mat_t L_inv;
sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true);
psi_inv = L_inv.transpose() * L_inv;
////Version 2: doing sparse solving "by hand" but ignoring sparse RHS
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]);
//for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]);
//}
//const sp_mat_t L_inv = L_inv_dens.sparseView();
//psi_inv = L_inv.transpose() * L_inv;
////Version 1: let Eigen do the solving
//cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
}
}
/*!
* \brief Caclulate Psi^(-1) if dense matrices are used
* \param psi_inv[out] Inverse covariance matrix
* \param cluster_i Cluster index for which Psi^(-1) is calculated
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
if (only_grouped_REs_use_woodbury_identity_) {//typically currently not called as only_grouped_REs_use_woodbury_identity_ is only true for grouped REs only i.e. sparse matrices
T3 MInvSqrtZt;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvSqrtZt = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * Zt_[cluster_i];
}
else {
MInvSqrtZt = Zt_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < (int)MInvSqrtZt.cols(); ++j) {
L_solve(chol_facts_[cluster_i].data(), (int)chol_facts_[cluster_i].cols(), MInvSqrtZt.data() + j * (int)MInvSqrtZt.cols());
}
}
psi_inv = -MInvSqrtZt.transpose() * MInvSqrtZt;
psi_inv.diagonal().array() += 1.0;
}
else {
////Version 1
//psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
//Version 2: solving by hand
T3 L_inv = Id_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]);
}
//chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower
psi_inv = L_inv.transpose() * L_inv;
// Using dpotri from LAPACK does not work since LAPACK is not installed
//int info = 0;
//int n = num_data_per_cluster_[cluster_i];
//int lda = num_data_per_cluster_[cluster_i];
//char* uplo = "L";
//den_mat_t M = chol_facts_[cluster_i];
//BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info);
}
}
/*!
* \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true
* \param H Right-hand side matrix H
* \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H)
* \param cluster_i Cluster index for which Psi^(-0.5)H is calculated
* \param lower true if chol_facts_[cluster_i] is a lower triangular matrix
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) {
eigen_sp_Lower_sp_RHS_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, lower);
//TODO: use eigen_sp_Lower_sp_RHS_cs_solve -> faster? (currently this crashes due to Eigen bug, see the definition of sp_Lower_sp_RHS_cs_solve for more details)
}
/*!
* \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' and if only_grouped_REs_use_woodbury_identity_ == true
* \param H Right-hand side matrix H
* \param PsiInvSqrtH[out] Psi^(-0.5)H = solve(chol(Psi),H)
* \param cluster_i Cluster index for which Psi^(-0.5)H is calculated
* \param lower true if chol_facts_[cluster_i] is a lower triangular matrix
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(sp_mat_t& H, T3& PsiInvSqrtH, gp_id_t cluster_i, bool lower = true) {
PsiInvSqrtH = den_mat_t(H);
#pragma omp parallel for schedule(static)
for (int j = 0; j < H.cols(); ++j) {
if (lower) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
else {
L_t_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
}
}
///*!
//* \brief Caclulate X^TPsi^(-1)X
//* \param X Covariate data matrix X
//* \param[out] XT_psi_inv_X X^TPsi^(-1)X
//*/
// template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
// //same for sparse matrices
// template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(),
// num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(),
// num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
/*!
* \brief Caclulate X^TPsi^(-1)X
* \param X Covariate data matrix X
* \param[out] XT_psi_inv_X X^TPsi^(-1)X
*/
void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / idependent GP realization
if (vecchia_approx_) {
den_mat_t BX = B_[unique_clusters_[0]] * X;
XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX;
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
den_mat_t ZtX = Zt_[unique_clusters_[0]] * X;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
den_mat_t MInvSqrtZtX = chol_facts_[unique_clusters_[0]].diagonal().array().inverse().matrix().asDiagonal() * ZtX;
XT_psi_inv_X = X.transpose() * X - MInvSqrtZtX.transpose() * MInvSqrtZtX;
}
else {
//TODO: use only one forward solve (sp_L_solve for sparse and sp_L_solve for dense matrices) instead of using Eigens solver which does two solves. But his requires a templace function since the Cholesky factor is T_mat
XT_psi_inv_X = X.transpose() * X - ZtX.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(ZtX);
}
}
else {
XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X);
}
}
}//end only one cluster / idependent GP realization
else {//more than one cluster and order of samples matters
XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
XT_psi_inv_X.setZero();
den_mat_t BX;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
}
else {
if (only_grouped_REs_use_woodbury_identity_) {
den_mat_t ZtX = Zt_[cluster_i] * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all);
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
den_mat_t MInvSqrtZtX = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtX;
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) -
MInvSqrtZtX.transpose() * MInvSqrtZtX;
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * (den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all) -
ZtX.transpose() * chol_facts_solve_[cluster_i].solve(ZtX);
}
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all));
}
}
}
}//end more than one cluster
}
/*!
* \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments.
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization)
* \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization
* \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param[out] unique_clusters Unique labels of independent realizations
* \param[out] num_clusters Number of independent clusters
*/
void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data,
std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) {
if (cluster_ids_data != nullptr) {
for (int i = 0; i < num_data; ++i) {
if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i]
unique_clusters.push_back(cluster_ids_data[i]);
num_data_per_cluster.insert({ cluster_ids_data[i], 1 });
std::vector<int> id;
id.push_back(i);
data_indices_per_cluster.insert({ cluster_ids_data[i], id });
}
else {
num_data_per_cluster[cluster_ids_data[i]] += 1;
data_indices_per_cluster[cluster_ids_data[i]].push_back(i);
}
}
num_clusters = (data_size_t)unique_clusters.size();
}
else {
unique_clusters.push_back(0);
num_data_per_cluster.insert({ 0, num_data });
num_clusters = 1;
std::vector<int> gp_id_vec(num_data);
for (int i = 0; i < num_data; ++i) {
gp_id_vec[i] = i;
}
data_indices_per_cluster.insert({ 0, gp_id_vec });
}
}
/*!
* \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects
*/
void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group,
const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) {
int char_start = 0;
for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct
for (int id = 0; id < num_data; ++id) {
int number_chars = 0;
while (re_group_data[char_start + number_chars] != '\0') {
number_chars++;
}
re_group_levels[ire][id] = std::string(re_group_data + char_start);
char_start += number_chars + 1;
}
}
}
/*!
* \brief Initialize likelihoods
* \param likelihood Likelihood name
*/
void InitializeLikelihoods(const string_t& likelihood) {
for (const auto& cluster_i : unique_clusters_) {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
cum_num_rand_eff_[cluster_i][num_comps_total_]));
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->GetNumUniqueREs()));
}
else if (only_one_GP_calculations_on_RE_scale_) {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->GetNumUniqueREs()));
}
else {
likelihood_[cluster_i] = std::unique_ptr<Likelihood<T_chol>>(new Likelihood<T_chol>(likelihood,
num_data_per_cluster_[cluster_i],
num_data_per_cluster_[cluster_i]));
}
if (!gauss_likelihood_) {
likelihood_[cluster_i]->InitializeModeAvec();
}
}
}
/*!
* \brief Function that determines
* (i) the indices (in ind_par_) of the covariance parameters of every random effect component in the vector of all covariance parameter
* (ii) the total number of covariance parameters
*/
void DetermineCovarianceParameterIndicesNumCovPars() {
// Determine num_cov_par_
num_cov_par_ = num_re_group_total_ + (2 * num_gp_total_);
if (gauss_likelihood_) {
num_cov_par_++;//nugget effect
}
// Determine ind_par_
ind_par_ = std::vector<data_size_t>();
//First re_comp has either index 0 or 1 (the latter if there is an nugget effect for Gaussian data)
if (gauss_likelihood_) {
ind_par_.push_back(1);
}
else {
ind_par_.push_back(0);
}
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_re_group_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 1);//end points of parameter indices of components
}
for (int j = 0; j < num_gp_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components
}
}
/*!
* \brief Function that determines whether to use special options for estimation and prediction for certain special cases of random effects models
*/
void DetermineSpecialCasesModelsEstimationPrediction() {
// Decide whether to use the Woodbury identity (i.e. do matrix inversion on the b scale and not the Zb scale) for grouped random effects models only
if (num_re_group_ > 0 && num_gp_total_ == 0) {
do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used
only_grouped_REs_use_woodbury_identity_ = true;//Faster to use Woodbury identity since the dimension of the random effects is typically much smaller than the number of data points
//Note: the use of the Woodburry identity is currently only implemented for grouped random effects (which is also the only use of it).
// If this should be applied to GPs in the future, adaptions need to be made e.g. in the calculations of the gradient (see y_tilde2_)
}
else {
do_symbolic_decomposition_ = false;
only_grouped_REs_use_woodbury_identity_ = false;
}
// Following are options that depend on the type of likelihood used
//Define options for faster calculations for special cases of RE models
only_one_GP_calculations_on_RE_scale_ = num_gp_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_ && !vecchia_approx_;//If there is only one GP, we do calculations on the b-scale instead of Zb-scale (currently only for non-Gaussian data)
only_one_grouped_RE_calculations_on_RE_scale_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && !gauss_likelihood_;//If there is only one grouped RE, we do (all) calculations on the b-scale instead of the Zb-scale (currently only for non-Gaussian data)
only_one_grouped_RE_calculations_on_RE_scale_for_prediction_ = num_re_group_total_ == 1 && num_comps_total_ == 1 && gauss_likelihood_;//If there is only one grouped RE, we do calculations for prediction on the b-scale instead of the Zb-scale (only effective for Gaussian data)
}
/*!
* \brief Initialize required matrices used when only_grouped_REs_use_woodbury_identity_==true
*/
void InitializeMatricesForOnlyGroupedREsUseWoodburyIdentity() {
CHECK(num_comps_total_ == num_re_group_total_);
Zt_ = std::map<gp_id_t, sp_mat_t>();
ZtZ_ = std::map<gp_id_t, sp_mat_t>();
cum_num_rand_eff_ = std::map<gp_id_t, std::vector<data_size_t>>();
Zj_square_sum_ = std::map<gp_id_t, std::vector<double>>();
ZtZj_ = std::map<gp_id_t, std::vector<sp_mat_t>>();
for (const auto& cluster_i : unique_clusters_) {
std::vector<data_size_t> cum_num_rand_eff_cluster_i(num_comps_total_ + 1);
cum_num_rand_eff_cluster_i[0] = 0;
//Determine number of rows and non-zero entries of Z
int non_zeros = 0;
int ncols = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
ncols += (int)Z_j->cols();
non_zeros += (int)Z_j->nonZeros();
cum_num_rand_eff_cluster_i[j + 1] = ncols;
}
//Create matrix Z and calculate sum(Z_j^2) = trace(Z_j^T * Z_j)
std::vector<Triplet_t> triplets;
triplets.reserve(non_zeros);
std::vector<double> Zj_square_sum_cluster_i(num_comps_total_);
int ncol_prev = 0;
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
for (int k = 0; k < Z_j->outerSize(); ++k) {
for (sp_mat_t::InnerIterator it(*Z_j, k); it; ++it) {
triplets.emplace_back(it.row(), ncol_prev + it.col(), it.value());
}
}
ncol_prev += (int)Z_j->cols();
Zj_square_sum_cluster_i[j] = Z_j->squaredNorm();
}
sp_mat_t Z_cluster_i(num_data_per_cluster_[cluster_i], ncols);
Z_cluster_i.setFromTriplets(triplets.begin(), triplets.end());
sp_mat_t Zt_cluster_i = Z_cluster_i.transpose();
sp_mat_t ZtZ_cluster_i = Zt_cluster_i * Z_cluster_i;
//Calculate Z^T * Z_j
std::vector<sp_mat_t> ZtZj_cluster_i(num_comps_total_);
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
ZtZj_cluster_i[j] = Zt_cluster_i * (*Z_j);
}
//Save all quantities
Zt_.insert({ cluster_i, Zt_cluster_i });
ZtZ_.insert({ cluster_i, ZtZ_cluster_i });
cum_num_rand_eff_.insert({ cluster_i, cum_num_rand_eff_cluster_i });
Zj_square_sum_.insert({ cluster_i, Zj_square_sum_cluster_i });
ZtZj_.insert({ cluster_i, ZtZj_cluster_i });
}
}
/*!
* \brief Initialize identity matrices required for Gaussian data
*/
void InitializeIdentityMatricesForGaussianData() {
if (gauss_likelihood_) {
for (const auto& cluster_i : unique_clusters_) {
ConstructI<T_mat>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent for Gaussian data
}
}
}
/*!
* \brief Function that checks the compatibility of the chosen special options for estimation and prediction for certain special cases of random effects models
*/
void CheckCompatibilitySpecialOptions() {
//Some checks
if (only_one_GP_calculations_on_RE_scale_ && only_grouped_REs_use_woodbury_identity_) {
Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_grouped_REs_use_woodbury_identity_' to 'true'");
}
if (only_one_GP_calculations_on_RE_scale_ && only_one_grouped_RE_calculations_on_RE_scale_) {
Log::REFatal("Cannot set both 'only_one_GP_calculations_on_RE_scale_' and 'only_one_grouped_RE_calculations_on_RE_scale_' to 'true'");
}
if (vecchia_approx_) {//vecchia_approx_
if (num_re_group_total_ > 0) {
Log::REFatal("Vecchia approximation can currently not be used when there are grouped random effects");
}
}
if (only_one_GP_calculations_on_RE_scale_) {//only_one_GP_calculations_on_RE_scale_
if (gauss_likelihood_) {
Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Gaussian data");
}
if (vecchia_approx_) {
Log::REFatal("Option 'only_one_GP_calculations_on_RE_scale_' is currently not implemented for Vecchia approximation data");
}
CHECK(num_gp_total_ == 1);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 0);
}
if (only_one_grouped_RE_calculations_on_RE_scale_) {//only_one_grouped_RE_calculations_on_RE_scale_
if (gauss_likelihood_) {
Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_' is currently not implemented for Gaussian data");
}
CHECK(!vecchia_approx_);
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 1);
}
if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {//only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
CHECK(!vecchia_approx_);
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == 1);
CHECK(num_re_group_total_ == 1);
if (!gauss_likelihood_) {
Log::REFatal("Option 'only_one_grouped_RE_calculations_on_RE_scale_for_prediction_' is currently only effective for Gaussian data");
}
}
if (only_grouped_REs_use_woodbury_identity_) {//only_grouped_REs_use_woodbury_identity_
if (gauss_likelihood_ && only_one_grouped_RE_calculations_on_RE_scale_) {
Log::REFatal("Cannot enable 'only_one_grouped_RE_calculations_on_RE_scale_' if 'only_grouped_REs_use_woodbury_identity_' is enabled for Gaussian data");
}
CHECK(num_gp_total_ == 0);
CHECK(num_comps_total_ == num_re_group_total_);
}
}
/*!
* \brief Initialize individual component models and collect them in a containter
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param Group levels for every grouped random effect
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param num_re_group_rand_coef Number of grouped random coefficients
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting)
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs
* \param calculateZZt If true, the matrix Z*Z^T is calculated for grouped random effects and saved (usually not needed if Woodbury identity is used)
* \param[out] re_comps_cluster_i Container that collects the individual component models
*/
void CreateREComponents(data_size_t num_data,
data_size_t num_re_group,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
gp_id_t cluster_i,
std::vector<std::vector<string_t>>& re_group_levels,
std::map<gp_id_t, int>& num_data_per_cluster,
data_size_t num_re_group_rand_coef,
const double* re_group_rand_coef_data,
std::vector<int>& ind_effect_group_rand_coef,
data_size_t num_gp,
const double* gp_coords_data,
int dim_gp_coords,
const double* gp_rand_coef_data,
data_size_t num_gp_rand_coef,
const string_t cov_fct,
double cov_fct_shape,
int ind_intercept_gp,
bool calculateZZt,
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i) {
//Grouped REs
if (num_re_group > 0) {
for (int j = 0; j < num_re_group; ++j) {
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id]));
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>(
group_data,
calculateZZt,
!only_one_grouped_RE_calculations_on_RE_scale_)));
}
//Random slopes
if (num_re_group_rand_coef > 0) {
for (int j = 0; j < num_re_group_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T_mat>>(new RECompGroup<T_mat>(
re_comp->random_effects_indices_of_data_.data(),
re_comp->num_data_,
re_comp->map_group_label_index_,
re_comp->num_group_,
rand_coef_data,
calculateZZt)));
}
}
}
//GPs
if (num_gp > 0) {
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(
gp_coords_mat,
cov_fct,
cov_fct_shape,
true,
only_one_GP_calculations_on_RE_scale_)));
//Random slopes
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGP<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_cluster_i[ind_intercept_gp]);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(
re_comp->dist_,
re_comp->has_Z_,
&re_comp->Z_,
rand_coef_data,
cov_fct,
cov_fct_shape)));
}
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used
* \param num_data Number of data points
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param[out] re_comps_cluster_i Container that collects the individual component models
* \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B
* \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param num_neighbors The number of neighbors used in the Vecchia approximation
*/
void CreateREComponentsVecchia(data_size_t num_data,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster,
const double* gp_coords_data,
int dim_gp_coords,
const double* gp_rand_coef_data,
data_size_t num_gp_rand_coef,
const string_t cov_fct,
double cov_fct_shape,
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i,
std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i,
std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i,
std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
string_t vecchia_ordering = "none",
int num_neighbors = 30) {
if (vecchia_ordering == "random") {
unsigned seed = 0;
std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed));
}
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(gp_coords_mat, cov_fct, cov_fct_shape, false, false)));
find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) {
entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
}
entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A
}
//Random coefficients
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T_mat>>(new RECompGP<T_mat>(rand_coef_data, cov_fct, cov_fct_shape)));
//save random coefficient data in the form ot outer product matrices
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef);
}
int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1);
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
}
/*!
* \brief Set the covariance parameters of the components
* \param cov_pars Covariance parameters
*/
void SetCovParsComps(const vec_t& cov_pars) {
CHECK(cov_pars.size() == num_cov_par_);
if (gauss_likelihood_) {
sigma2_ = cov_pars[0];
}
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
re_comps_[cluster_i][j]->SetCovPars(pars);
}
}
}
/*!
* \brief Transform the covariance parameters to the scake on which the MLE is found
* \param cov_pars_trans Covariance parameters
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_trans = vec_t(num_cov_par_);
if (gauss_likelihood_) {
cov_pars_trans[0] = cov_pars[0];
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
vec_t pars_trans = pars;
if (gauss_likelihood_) {
re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans);
}
else {
re_comps_[unique_clusters_[0]][j]->TransformCovPars(1., pars, pars_trans);
}
cov_pars_trans.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_trans;
}
}
/*!
* \brief Back-transform the covariance parameters to the original scale
* \param cov_pars Covariance parameters
* \param[out] cov_pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_orig = vec_t(num_cov_par_);
if (gauss_likelihood_) {
cov_pars_orig[0] = cov_pars[0];
}
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]);
vec_t pars_orig = pars;
if (gauss_likelihood_) {
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig);
}
else {
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(1, pars, pars_orig);
}
cov_pars_orig.segment(ind_par_[j], ind_par_[j + 1] - ind_par_[j]) = pars_orig;
}
}
/*!
* \brief Calculate covariance matrices of the components
*/
void CalcSigmaComps() {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
re_comps_[cluster_i][j]->CalcSigma();
}
}
}
/*!
* \brief Construct inverse covariance matrix Sigma^-1 if there are onla grouped random effecs (this is then a diagonal matrix)
* \param[out] SigmaI Inverse covariance matrix of random effects (a diagonal matrix)
* \param cluster_i Cluster index for which SigmaI is constructed
*/
void CalcSigmaIGroupedREsOnly(sp_mat_t& SigmaI, gp_id_t cluster_i) {
CHECK(!only_one_grouped_RE_calculations_on_RE_scale_);
std::vector<Triplet_t> triplets;
triplets.reserve(cum_num_rand_eff_[cluster_i][num_comps_total_]);
for (int j = 0; j < num_comps_total_; ++j) {
double sigmaI = re_comps_[cluster_i][j]->cov_pars_[0];
sigmaI = 1.0 / sigmaI;
for (int i = cum_num_rand_eff_[cluster_i][j]; i < cum_num_rand_eff_[cluster_i][j + 1]; ++i) {
triplets.emplace_back(i, i, sigmaI);
}
}
SigmaI = sp_mat_t(cum_num_rand_eff_[cluster_i][num_comps_total_], cum_num_rand_eff_[cluster_i][num_comps_total_]);
SigmaI.setFromTriplets(triplets.begin(), triplets.end());
}
/*!
* \brief Factorize the covariance matrix (Gaussian data) or
* calculate the posterior mode of the random effects for use in the Laplace approximation (non-Gaussian data)
* And calculate the negative log-likelihood (Gaussian data) or the negative approx. marginal log-likelihood (non-Gaussian data)
* \param cov_pars Covariance parameters
* \param fixed_effects Fixed effects component of location parameter
*/
void CalcCovFactorOrModeAndNegLL(vec_t& cov_pars, const double* fixed_effects = nullptr) {
SetCovParsComps(cov_pars);
if (gauss_likelihood_) {
CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_, true, true, true);
}//end gauss_likelihood_
else {//not gauss_likelihood_
if (vecchia_approx_) {
CalcCovFactor(true, true, 1., false);
}
else {
CalcSigmaComps();
CalcCovMatrixNonGauss();
}
neg_log_likelihood_ = -CalcModePostRandEff(fixed_effects);//calculate mode and approximate marginal likelihood
}//end not gauss_likelihood_
}//end CalcCovFactorOrModeAndNegLL
/*!
* \brief Update covariance parameters, apply step size safeguard, factorize covariance matrix, and calculate new value of objective function
* \param[out] cov_pars Covariance parameters
* \param nat_grad Gradient for gradient descent or = FI^-1 * gradient for Fisher scoring (="natural" gradient)
* \param[out] lr_cov Learning rate (can be written on in case it get decreased)
* \param profile_out_marginal_variance If true, the first parameter (marginal variance, nugget effect) is ignored
* \param use_nesterov_acc If true, Nesterov acceleration is used
* \param it Iteration number
* \param optimizer_cov Optimizer used
* \param[out] cov_pars_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param[out] cov_pars_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param acc_rate_cov Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param fixed_effects Fixed effects component of location parameter
*/
void UpdateCovPars(vec_t& cov_pars, const vec_t& nat_grad, double& lr_cov, bool profile_out_marginal_variance,
bool use_nesterov_acc, int it, const string_t& optimizer_cov, vec_t& cov_pars_after_grad_aux, vec_t& cov_pars_after_grad_aux_lag1,
double acc_rate_cov, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects = nullptr) {
vec_t cov_pars_new(num_cov_par_);
if (profile_out_marginal_variance) {
cov_pars_new[0] = cov_pars[0];
}
double lr = lr_cov;
bool decrease_found = false;
bool halving_done = false;
for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) {
if (profile_out_marginal_variance) {
cov_pars_new.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale
}
else {
cov_pars_new = (cov_pars.array().log() - lr * nat_grad.array()).exp().matrix();//make update on log-scale
}
// Apply Nesterov acceleration
if (use_nesterov_acc) {
cov_pars_after_grad_aux = cov_pars_new;
ApplyMomentumStep(it, cov_pars_after_grad_aux, cov_pars_after_grad_aux_lag1, cov_pars_new, acc_rate_cov,
nesterov_schedule_version, profile_out_marginal_variance, momentum_offset, true);
// Note: (i) cov_pars_after_grad_aux and cov_pars_after_grad_aux_lag1 correspond to the parameters obtained after calculating the gradient before applying acceleration
// (ii) cov_pars (below this) are the parameters obtained after applying acceleration (and cov_pars_lag1 is simply the value of the previous iteration)
// We first apply a gradient step and then an acceleration step (and not the other way aroung) since this is computationally more efficient
// (otherwise the covariance matrix needs to be factored twice: once for the gradient step (accelerated parameters) and once for calculating the
// log-likelihood (non-accelerated parameters after gradient update) when checking for convergence at the end of an iteration.
// However, performing the acceleration before or after the gradient update gives equivalent algorithms
}
CalcCovFactorOrModeAndNegLL(cov_pars_new, fixed_effects);
// Safeguard agains too large steps by halving the learning rate when the objective increases
if (neg_log_likelihood_ <= neg_log_likelihood_after_lin_coef_update_) {
decrease_found = true;
break;
}
else {
halving_done = true;
lr *= 0.5;
acc_rate_cov *= 0.5;
if (!gauss_likelihood_) {
// Reset mode to previous value since also parameters are discarded
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->ResetModeToPreviousValue();
}
}
}
}
if (halving_done) {
if (optimizer_cov == "fisher_scoring") {
Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d. The learning rate has been decreased in this iteration.", it + 1);
}
else if (optimizer_cov == "gradient_descent") {
lr_cov = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring)
Log::REDebug("GPModel covariance parameter estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_cov);
}
}
if (!decrease_found) {
Log::REDebug("GPModel covariance parameter estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_);
}
if (use_nesterov_acc) {
cov_pars_after_grad_aux_lag1 = cov_pars_after_grad_aux;
}
cov_pars = cov_pars_new;
}//end UpdateCovPars
/*!
* \brief Update linear regression coefficients and apply step size safeguard
* \param[out] beta Linear regression coefficients
* \param grad Gradient
* \param[out] lr_coef Learning rate (can be written on in case it get decreased)
* \param use_nesterov_acc If true, Nesterov acceleration is used
* \param it Iteration number
* \param[out] beta_after_grad_aux Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param[out] beta_after_grad_aux_lag1 Auxiliary variable used only if use_nesterov_acc == true (see the code below for a description)
* \param acc_rate_coef Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param fixed_effects External fixed effects
* \param[out] fixed_effects_vec Fixed effects component of location parameter as sum of linear predictor and potentiall additional external fixed effects
*/
void UpdateLinCoef(vec_t& beta, const vec_t& grad, double& lr_coef, const vec_t& cov_pars,
bool use_nesterov_acc, int it, vec_t& beta_after_grad_aux, vec_t& beta_after_grad_aux_lag1,
double acc_rate_coef, int nesterov_schedule_version, int momentum_offset, const double* fixed_effects, vec_t& fixed_effects_vec) {
vec_t beta_new;
double lr = lr_coef;
vec_t resid;
bool decrease_found = false;
bool halving_done = false;
for (int ih = 0; ih < MAX_NUMBER_HALVING_STEPS_; ++ih) {
beta_new = beta - lr * grad;
// Apply Nesterov acceleration
if (use_nesterov_acc) {
beta_after_grad_aux = beta_new;
ApplyMomentumStep(it, beta_after_grad_aux, beta_after_grad_aux_lag1, beta_new, acc_rate_coef,
nesterov_schedule_version, false, momentum_offset, false);
//Note: use same version of Nesterov acceleration as for covariance parameters (see 'UpdateCovPars')
}
if (gauss_likelihood_) {
// Set resid for updating covariance parameters
resid = y_vec_ - (X_ * beta_new);
SetY(resid.data());
// Calculate y_aux = Psi^-1 * y (if not only_grouped_REs_use_woodbury_identity_) or y_tilde and y_tilde2 (if only_grouped_REs_use_woodbury_identity_) for covariance parameter update (only for Gaussian data)
if (only_grouped_REs_use_woodbury_identity_) {
CalcYtilde<T_mat>(true);//y_tilde = L^-1 * Z^T * y and y_tilde2 = Z * L^-T * L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else {
CalcYAux();//y_aux = Psi^-1 * y
}
EvalNegLogLikelihood(nullptr, cov_pars.data(), neg_log_likelihood_after_lin_coef_update_, true, true, true);
}//end if gauss_likelihood_
else {//non-Gaussian data
fixed_effects_vec = X_ * beta_new;
if (fixed_effects != nullptr) {//add external fixed effects to linear predictor
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_; ++i) {
fixed_effects_vec[i] += fixed_effects[i];
}
}
neg_log_likelihood_after_lin_coef_update_ = -CalcModePostRandEff(fixed_effects_vec.data());//calculate mode and approximate marginal likelihood
}
// Safeguard agains too large steps by halving the learning rate when the objective increases
if (neg_log_likelihood_after_lin_coef_update_ <= neg_log_likelihood_lag1_) {
decrease_found = true;
break;
}
else {
// Safeguard agains too large steps by halving the learning rate
halving_done = true;
lr *= 0.5;
acc_rate_coef *= 0.5;
if (!gauss_likelihood_) {
// Reset mode to previous value since also parameters are discarded
for (const auto& cluster_i : unique_clusters_) {
likelihood_[cluster_i]->ResetModeToPreviousValue();
}
}
}
}
if (halving_done) {
lr_coef = lr; //permanently decrease learning rate (for Fisher scoring, this is not done. I.e., step halving is done newly in every iterarion of Fisher scoring)
Log::REDebug("GPModel linear regression coefficient estimation: The learning rate has been decreased permanently since with the previous learning rate, there was no decrease in the objective function in iteration number %d. New learning rate = %g", it + 1, lr_coef);
}
if (!decrease_found) {
Log::REDebug("GPModel linear regression coefficient estimation: No decrease in the objective function in iteration number %d after the maximal number of halving steps (%d).", it + 1, MAX_NUMBER_HALVING_STEPS_);
}
if (use_nesterov_acc) {
beta_after_grad_aux_lag1 = beta_after_grad_aux;
}
beta = beta_new;
}//end UpdateLinCoef
/*!
* \brief Calculate the covariance matrix ZSigmaZt of the random effects (sum of all components)
* \param[out] ZSigmaZt Covariance matrix ZSigmaZt
* \param cluster_i Cluster index for which the covariance matrix is calculated
*/
void CalcZSigmaZt(T_mat& ZSigmaZt, gp_id_t cluster_i) {
ZSigmaZt.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
if (gauss_likelihood_) {
ZSigmaZt.setIdentity();
}
else {
ZSigmaZt.setZero();
}
for (int j = 0; j < num_comps_total_; ++j) {
ZSigmaZt += (*(re_comps_[cluster_i][j]->GetZSigmaZt()));
}
}//end CalcZSigmaZt
/*!
* \brief Calculate the covariance matrix ZSigmaZt if only_grouped_REs_use_woodbury_identity_==false or the inverse covariance matrix Sigma^-1 if there are only grouped REs i.e. if only_grouped_REs_use_woodbury_identity_==true.
* This function is only used for non-Gaussian data as in the Gaussian case this needs not be saved
*/
void CalcCovMatrixNonGauss() {
if (!only_one_grouped_RE_calculations_on_RE_scale_) {//Nothing to calculate if only_one_grouped_RE_calculations_on_RE_scale_
if (only_grouped_REs_use_woodbury_identity_) {
for (const auto& cluster_i : unique_clusters_) {
CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i);
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
if (num_comps_total_ == 1) {//no need to sum up different components
ZSigmaZt_[cluster_i] = re_comps_[cluster_i][0]->GetZSigmaZt();
}
else {
T_mat ZSigmaZt;
CalcZSigmaZt(ZSigmaZt, cluster_i);
ZSigmaZt_[cluster_i] = std::make_shared<T_mat>(ZSigmaZt);
}
}
}
}
}//end CalcCovMatrixNonGauss
/*!
* \brief Calculate the mode of the posterior of the latent random effects for use in the Laplace approximation. This function is only used for non-Gaussian data
* \param fixed_effects Fixed effects component of location parameter
* \return Approximate marginal log-likelihood evaluated at the mode
*/
double CalcModePostRandEff(const double* fixed_effects = nullptr) {
double mll = 0.;
double mll_cluster_i;
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
//TODO: this is quite inefficient as the mapping of the fixed_effects to the different clusters is done repeatedly for the same data. Could be saved if performance is an issue here.
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
mll_cluster_i);
}
else {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLGroupedRE(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
mll_cluster_i);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
mll_cluster_i);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
mll_cluster_i);
//Note: ZSigmaZt_[cluster_i] contain Sigma=Cov(b) and not Z*Sigma*Zt since has_Z_==false for this random effects component
}
else {
(*likelihood_[cluster_i]).template FindModePostRandEffCalcMLLStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
mll_cluster_i);
}
}
mll += mll_cluster_i;
}
return(mll);
}//CalcModePostRandEff
/*!
* \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP)
* \param num_data_cluster_i Number of data points
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param re_comps_cluster_i Container that collects the individual component models
* \param nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param entries_init_B_cluster_i Triplets for intializing the matrices B
* \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation
* \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation
* \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation
* \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance
*/
void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const
std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i,
bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_;
int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget;
//Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel)
B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A
B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal
D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below
D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below)
if (!transf_scale) {
D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale
}
if (!gauss_likelihood_) {
D_inv_cluster_i.diagonal().array() *= 0.;
}
if (calc_gradient) {
B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A)
D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D
for (int ipar = 0; ipar < num_par_gp; ++ipar) {
B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end());
D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
D_grad_cluster_i[ipar].setIdentity();//Put 0 on the diagonal
D_grad_cluster_i[ipar].diagonal().array() = 0.;//TODO: maybe change initialization of this matrix by also using triplets -> faster?
}
}//end initialization
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_cluster_i; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives
den_mat_t cov_mat_obs_neighbors(1, num_nn);
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);
std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter
std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp);
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors
if (j == 0) {
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct()
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
if (calc_gradient) {
cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
}
}
}//end loop over components j
}//end if(i>1)
//Calculate matrices B and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0];
if (!transf_scale) {
d_comp_j *= nugget_var;
}
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
D_inv_cluster_i.coeffRef(i, i) += d_comp_j;
if (calc_gradient) {
if (transf_scale) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal
}
else {
if (j == 0) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale
}
else {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
}
}
}
if (calc_gradient && calc_gradient_nugget) {
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.;
}
//2. remaining terms
if (i > 0) {
if (gauss_likelihood_) {
if (transf_scale) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
}
else {
cov_mat_between_neighbors.diagonal().array() += nugget_var;
}
}
//else {//Seems unnecessary
// cov_mat_between_neighbors.diagonal().array() += 1e-10;//Avoid numerical problems when there is no nugget effect
//}
den_mat_t A_i(1, num_nn);
den_mat_t cov_mat_between_neighbors_inv;
den_mat_t A_i_grad_sigma2;
if (calc_gradient) {
// Note: it is faster (approx. 1.5-2 times) to first calculate cov_mat_between_neighbors_inv and the multiply this with the matrices below
// instead of always using the Cholesky factor of cov_mat_between_neighbors to calculate cov_mat_between_neighbors_inv * (a matrix)
den_mat_t I(num_nn, num_nn);
I.setIdentity();
cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I);
A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv;
if (calc_gradient_nugget) {
A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv;
}
}
else {
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
}
for (int inn = 0; inn < num_nn; ++inn) {
B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn);
}
D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
if (calc_gradient) {
den_mat_t A_i_grad(1, num_nn);
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) -
(cov_mat_obs_neighbors * cov_mat_between_neighbors_inv *
cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv);
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn);
}
if (ipar == 0) {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance
}
else {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range
}
}
}
if (calc_gradient_nugget) {
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn);
}
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end calc_gradient
}//end if i > 0
D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i);
}//end loop over data i
}//end CalcCovFactorVecchia
/*!
* \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix)
* Use only for Gaussian data
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation)
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if vecchia_approx_==true and transf_scale ==false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation)
*/
void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
if (vecchia_approx_) {
for (const auto& cluster_i : unique_clusters_) {
int num_data_cl_i = num_data_per_cluster_[cluster_i];
CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i],
dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i],
entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i],
B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget);
}
}
else {
CalcSigmaComps();
for (const auto& cluster_i : unique_clusters_) {
if (only_grouped_REs_use_woodbury_identity_) {//Use Woodburry matrix inversion formula: used only if there are only grouped REs
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
CalcSigmaIGroupedREsOnly(SigmaI_[cluster_i], cluster_i);
chol_facts_[cluster_i] = (SigmaI_[cluster_i].diagonal().array() + ZtZ_[cluster_i].diagonal().array()).sqrt().matrix().asDiagonal();
}
else {
sp_mat_t SigmaI;
CalcSigmaIGroupedREsOnly(SigmaI, cluster_i);
T_mat SigmaIplusZtZ = SigmaI + ZtZ_[cluster_i];
CalcChol<T_mat>(SigmaIplusZtZ, cluster_i, do_symbolic_decomposition_);
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi;
CalcZSigmaZt(psi, cluster_i);
CalcChol<T_mat>(psi, cluster_i, do_symbolic_decomposition_);
}//end not only_grouped_REs_use_woodbury_identity_
}
do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used)
}
}
/*!
* \brief Calculate Psi^-1*y (and save in y_aux_)
* \param marg_variance The marginal variance. Default = 1.
*/
void CalcYAux(double marg_variance = 1.) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
vec_t MInvZty;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
MInvZty = (Zty_[cluster_i].array() / (chol_facts_[cluster_i].diagonal().array().square())).matrix();
}
else {
MInvZty = chol_facts_solve_[cluster_i].solve(Zty_[cluster_i]);
}
y_aux_[cluster_i] = y_[cluster_i] - Zt_[cluster_i].transpose() * MInvZty;
}
else {
//Version 1: let Eigen do the computation
y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]);
//// Version 2 'do-it-yourself' (for sparse matrices)
//y_aux_[cluster_i] = y_[cluster_i];
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
//sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
}
}//end non-Vecchia
if (marg_variance != 1.) {
y_aux_[cluster_i] /= marg_variance;
}
}
y_aux_has_been_calculated_ = true;
}
/*!
* \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if sparse matrices are used
* \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcYtilde(bool also_calculate_ytilde2 = false) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix();
if (also_calculate_ytilde2) {
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix());
}
}
else {
y_tilde_[cluster_i] = Zty_[cluster_i];
const double* val = chol_facts_[cluster_i].valuePtr();
const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
sp_L_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data());
if (also_calculate_ytilde2) {
vec_t ytilde_aux = y_tilde_[cluster_i];
sp_L_t_solve(val, row_idx, col_ptr, cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data());
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux;
}
}
}
}
/*!
* \brief Calculate y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z) (and save in y_tilde_) if dense matrices are used
* \param also_calculate_ytilde2 If true y_tilde2 = Z * L^-T * L^-1 * Z^T * y is also calculated
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcYtilde(bool also_calculate_ytilde2 = false) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
y_tilde_[cluster_i] = y_tilde_[cluster_i] = (Zty_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix();
if (also_calculate_ytilde2) {
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ((y_tilde_[cluster_i].array() / chol_facts_[cluster_i].diagonal().array()).matrix());
}
}
else {
y_tilde_[cluster_i] = Zty_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], y_tilde_[cluster_i].data());
if (also_calculate_ytilde2) {
vec_t ytilde_aux = y_tilde_[cluster_i];
L_t_solve(chol_facts_[cluster_i].data(), cum_num_rand_eff_[cluster_i][num_comps_total_], ytilde_aux.data());
y_tilde2_[cluster_i] = Zt_[cluster_i].transpose() * ytilde_aux;
}
}
}
}
/*!
* \brief Calculate y^T*Psi^-1*y if sparse matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
* \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored
* \param cluster_ind Cluster index
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true,
gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
yTPsiInvy = 0;
std::vector<gp_id_t> clusters_iterate;
if (all_clusters) {
clusters_iterate = unique_clusters_;
}
else {
clusters_iterate = std::vector<gp_id_t>(1);
clusters_iterate[0] = cluster_ind;
}
for (const auto& cluster_i : clusters_iterate) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
if (!CalcYtilde_already_done) {
CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) {
Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first.");
}
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0);
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
vec_t y_aux_sqrt = y_[cluster_i];
const double* val = chol_facts_[cluster_i].valuePtr();
const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate y^T*Psi^-1*y if dense matrices are used
* \param[out] yTPsiInvy y^T*Psi^-1*y
* \param all_clusters If true, then y^T*Psi^-1*y is calculated for all clusters / data and cluster_ind is ignored
* \param cluster_ind Cluster index
* \param CalcYAux_already_done If true, it is assumed that y_aux_=Psi^-1y_ has already been calculated (only relevant for not only_grouped_REs_use_woodbury_identity_)
* \param CalcYtilde_already_done If true, it is assumed that y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z), has already been calculated (only relevant for only_grouped_REs_use_woodbury_identity_)
*/
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcYTPsiIInvY(double& yTPsiInvy, bool all_clusters = true,
gp_id_t cluster_ind = 1, bool CalcYAux_already_done = false, bool CalcYtilde_already_done = false) {
yTPsiInvy = 0;
std::vector<gp_id_t> clusters_iterate;
if (all_clusters) {
clusters_iterate = unique_clusters_;
}
else {
clusters_iterate = std::vector<gp_id_t>(1);
clusters_iterate[0] = cluster_ind;
}
for (const auto& cluster_i : clusters_iterate) {
if (y_.find(cluster_i) == y_.end()) {
Log::REFatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
if (B_.find(cluster_i) == B_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
vec_t y_aux_sqrt = B_[cluster_i] * y_[cluster_i];
yTPsiInvy += (y_aux_sqrt.transpose() * D_inv_[cluster_i] * y_aux_sqrt)(0, 0);
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::REFatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
if (only_grouped_REs_use_woodbury_identity_) {
if (!CalcYtilde_already_done) {
CalcYtilde<T_mat>(false);//y_tilde = L^-1 * Z^T * y, L = chol(Sigma^-1 + Z^T * Z)
}
else if ((int)y_tilde_[cluster_i].size() != cum_num_rand_eff_[cluster_i][num_comps_total_]) {
Log::REFatal("y_tilde = L^-1 * Z^T * y has not the correct number of data points. Call 'CalcYtilde' first.");
}
yTPsiInvy += (y_[cluster_i].transpose() * y_[cluster_i])(0, 0) - (y_tilde_[cluster_i].transpose() * y_tilde_[cluster_i])(0, 0);
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
if (CalcYAux_already_done) {
yTPsiInvy += (y_[cluster_i].transpose() * y_aux_[cluster_i])(0, 0);
}
else {
vec_t y_aux_sqrt = y_[cluster_i];
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], y_aux_sqrt.data());
yTPsiInvy += (y_aux_sqrt.transpose() * y_aux_sqrt)(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}
}
/*!
* \brief Calculate gradient for covariance parameters
* This assumes that the covariance matrix has been factorized (by 'CalcCovFactor') and that y_aux or y_tilde/y_tilde2 (if only_grouped_REs_use_woodbury_identity_) have been calculated (by 'CalcYAux' or 'CalcYtilde')
* \param cov_pars Covariance parameters
* \param[out] grad Gradient w.r.t. covariance parameters
* \param include_error_var If true, the gradient for the marginal variance parameter (=error, nugget effect) is also calculated, otherwise not (set this to true if the nugget effect is not calculated by using the closed-form solution)
* \param save_psi_inv If true, the inverse covariance matrix Psi^-1 is saved for reuse later (e.g. when calculating the Fisher information in Fisher scoring). This option is ignored if the Vecchia approximation is used.
* \param fixed_effects Fixed effects component of location parameter (used only for non-Gaussian data)
*/
void CalcCovParGrad(vec_t& cov_pars, vec_t& cov_grad, bool include_error_var = false,
bool save_psi_inv = false, const double* fixed_effects = nullptr) {
if (gauss_likelihood_) {//Gaussian data
if (include_error_var) {
cov_grad = vec_t::Zero(num_cov_par_);
}
else {
cov_grad = vec_t::Zero(num_cov_par_ - 1);
}
int first_cov_par = include_error_var ? 1 : 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {//Vechia approximation
vec_t u(num_data_per_cluster_[cluster_i]);
vec_t uk(num_data_per_cluster_[cluster_i]);
if (include_error_var) {
u = B_[cluster_i] * y_[cluster_i];
cov_grad[0] += -1. * ((double)(u.transpose() * D_inv_[cluster_i] * u)) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
u = D_inv_[cluster_i] * u;
}
else {
u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here?
}
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i];
cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ +
0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal()));
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_) {
if (include_error_var) {
double yTPsiInvy;
CalcYTPsiIInvY<T_mat>(yTPsiInvy, false, cluster_i, true, true);
cov_grad[0] += -1. * yTPsiInvy / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
}
std::vector<T_mat> LInvZtZj_cluster_i;
if (save_psi_inv) {
LInvZtZj_[cluster_i].clear();
LInvZtZj_cluster_i = std::vector<T_mat>(num_comps_total_);
}
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
vec_t y_tilde_j = (*Z_j).transpose() * y_[cluster_i];
vec_t y_tilde2_j = (*Z_j).transpose() * y_tilde2_[cluster_i];
double yTPsiIGradPsiPsiIy = y_tilde_j.transpose() * y_tilde_j - 2. * (double)(y_tilde_j.transpose() * y_tilde2_j) + y_tilde2_j.transpose() * y_tilde2_j;
yTPsiIGradPsiPsiIy *= cov_pars[j + 1];
T_mat LInvZtZj;
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
LInvZtZj = ZtZ_[cluster_i];
LInvZtZj.diagonal().array() /= chol_facts_[cluster_i].diagonal().array();
}
else {
CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj, cluster_i, true);
}
if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information
LInvZtZj_cluster_i[j] = LInvZtZj;
}
double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj.squaredNorm();
trace_PsiInvGradPsi *= cov_pars[j + 1];
cov_grad[first_cov_par + j] += -1. * yTPsiIGradPsiPsiIy / sigma2_ / 2. + trace_PsiInvGradPsi / 2.;
}
if (save_psi_inv) {
LInvZtZj_[cluster_i] = LInvZtZj_cluster_i;
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi_inv;
CalcPsiInv(psi_inv, cluster_i);
if (save_psi_inv) {//save for latter use when e.g. calculating the Fisher information
psi_inv_[cluster_i] = psi_inv;
}
if (include_error_var) {
cov_grad[0] += -1. * ((double)(y_[cluster_i].transpose() * y_aux_[cluster_i])) / sigma2_ / 2. + num_data_per_cluster_[cluster_i] / 2.;
}
for (int j = 0; j < num_comps_total_; ++j) {
for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) {
std::shared_ptr<T_mat> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar, true, 1.);
cov_grad[first_cov_par + ind_par_[j] - 1 + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. +
((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.;
}
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}// end loop over clusters
}//end gauss_likelihood_
else {//not gauss_likelihood_
if (include_error_var) {
Log::REFatal("There is no error variance (nugget effect) for non-Gaussian data");
}
cov_grad = vec_t::Zero(num_cov_par_);
vec_t cov_grad_cluster_i(num_cov_par_);
vec_t empty_unused_vec(0);//placeholder for fixed effects gradient
const double* fixed_effects_cluster_i_ptr = nullptr;
vec_t fixed_effects_cluster_i;
for (const auto& cluster_i : unique_clusters_) {
//map fixed effects to clusters (if needed)
vec_t grad_F_cluster_i(num_data_per_cluster_[cluster_i]);
if (num_clusters_ == 1 && (!vecchia_approx_ || vecchia_ordering_ == "none")) {//only one cluster / independent realization and order of data does not matter
fixed_effects_cluster_i_ptr = fixed_effects;
}
else if (fixed_effects != nullptr) {//more than one cluster and order of samples matters
fixed_effects_cluster_i = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
#pragma omp parallel for schedule(static)
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
fixed_effects_cluster_i[j] = fixed_effects[data_indices_per_cluster_[cluster_i][j]];
}
fixed_effects_cluster_i_ptr = fixed_effects_cluster_i.data();
}
if (vecchia_approx_) {//Vechia approximation
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
B_grad_[cluster_i],
D_grad_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxGroupedRE<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cum_num_rand_eff_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end only_grouped_REs_use_woodbury_identity_
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
re_comps_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}
else {//not only_grouped_REs_use_woodbury_identity_ and not only_one_GP_calculations_on_RE_scale_
(*likelihood_[cluster_i]).template CalcGradNegMargLikelihoodLAApproxStable<T_mat>(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
re_comps_[cluster_i],
true,
false,
cov_grad_cluster_i.data(),
empty_unused_vec,
false);
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
cov_grad += cov_grad_cluster_i;
}// end loop over clusters
}//end not gauss_likelihood_
}//end CalcCovParGrad
/*!
* \brief Apply a momentum step
* \param it Iteration number
* \param pars Parameters
* \param pars_lag1 Parameters from last iteration
* \param[out] pars_acc Accelerated parameters
* \param nesterov_acc_rate Nesterov acceleration speed
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param log_scale If true, the momentum step is done on the log-scale
*/
void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, vec_t& pars_acc, double nesterov_acc_rate = 0.5,
int nesterov_schedule_version = 0, bool exclude_first_log_scale = true, int momentum_offset = 2, bool log_scale = false) {
double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset);
int num_par = (int)pars.size();
if (exclude_first_log_scale) {
pars_acc[0] = pars[0];
pars_acc.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale
}
else {
if (log_scale) {
pars_acc = ((mu + 1.) * (pars.array().log()) - mu * (pars_lag1.array().log())).exp().matrix();
}
else {
pars_acc = (mu + 1) * pars - mu * pars_lag1;
}
}
}
/*!
* \brief Calculate gradient for linear fixed-effect coefficients
* \param marg_var Marginal variance parameters sigma^2 (only used for Gaussian data)
* \param beta Linear regression coefficients
* \param[out] grad_beta Gradient for linear regression coefficients
* \param fixed_effects Fixed effects component of location parameter for observed data (only used for non-Gaussian data)
*/
void CalcLinCoefGrad(double marg_var, const vec_t beta, vec_t& grad_beta, const double* fixed_effects = nullptr) {
if (gauss_likelihood_) {
const vec_t resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
vec_t y_aux(num_data_);
GetYAux(y_aux);
grad_beta = (-1. / marg_var) * (X_.transpose()) * y_aux;
//beta += lr * (1. / marg_var) * (X.transpose()) * y_aux;
}
else {
vec_t grad_F(num_data_);
CalcGradFLaplace(grad_F.data(), fixed_effects);
grad_beta = (X_.transpose()) * grad_F;
}
}
/*!
* \brief Update linear fixed-effect coefficients using generalized least squares (GLS)
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGLS(den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
den_mat_t XT_psi_inv_X;
CalcXTPsiInvX(X, XT_psi_inv_X);
beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux);
}
/*!
* \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first
* \param cov_pars Covariance parameters
* \param[out] FI Fisher information
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true
* \param include_error_var If true, the marginal variance parameter is also included, otherwise not
* \param use_saved_psi_inv If false, the inverse covariance matrix Psi^-1 is calculated, otherwise a saved version is used
*/
void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true,
bool include_error_var = false, bool use_saved_psi_inv = false) {
if (include_error_var) {
FI = den_mat_t(num_cov_par_, num_cov_par_);
}
else {
FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1);
}
FI.setZero();
int start_cov_pars = include_error_var ? 1 : 0;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
//Note: if transf_scale==false, then all matrices and derivatives have been calculated on the original scale for the Vecchia approximation, that is why there is no adjustment here
//Calculate auxiliary matrices for use below
sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
Identity.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_solve(B_[cluster_i], Identity, B_inv, true);//No noticeable difference in (n=500, nn=100/30) compared to using eigen_sp_Lower_sp_RHS_cs_solve()
//eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true);
sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D.setIdentity();
D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1);
sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D_inv_2.setIdentity();
D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2);
//Calculate derivative(B) * B^-1
std::vector<sp_mat_t> B_grad_B_inv(num_cov_par_ - 1);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv[par_nb] = B_grad_[cluster_i][par_nb] * B_inv;
}
//Calculate Fisher information
sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D;
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (in particular, log-scale)
//The derivative for the nugget variance on the log scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += (double)((D_inv_[cluster_i].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum()) / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
int ind_grad_nugget = num_cov_par_ - 1;
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv;
B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum());
FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
B_grad_B_inv_D = B_grad_B_inv[par_nb] * D;
diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array()).sum());
FI(0, par_nb + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_B_inv[par_nb];
for (int par_nb_cross = par_nb; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
B_grad_B_inv_D = B_grad_B_inv[par_nb_cross] * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_nb].diagonal().array() * D_grad_[cluster_i][par_nb_cross].diagonal().array()).sum());
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
}
}//end vecchia_approx_
else {//not vecchia_approx_
if (only_grouped_REs_use_woodbury_identity_) {
//Notation used below: M = Sigma^-1 + ZtZ, Sigma = cov(b) b=latent random effects, L=chol(M) i.e. M=LLt, MInv = M^-1 = L^-TL^-1
if (!use_saved_psi_inv) {
LInvZtZj_[cluster_i] = std::vector<T_mat>(num_comps_total_);
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
LInvZtZj_[cluster_i][0] = ZtZ_[cluster_i];
LInvZtZj_[cluster_i][0].diagonal().array() /= chol_facts_[cluster_i].diagonal().array();
}
else {
for (int j = 0; j < num_comps_total_; ++j) {
CalcPsiInvSqrtH(ZtZj_[cluster_i][j], LInvZtZj_[cluster_i][j], cluster_i, true);
}
}
}
if (include_error_var) {
if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale)
//The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int j = 0; j < num_comps_total_; ++j) {
double trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - LInvZtZj_[cluster_i][j].squaredNorm();
FI(0, j + 1) += trace_PsiInvGradPsi * cov_pars[j + 1] / 2.;
}
}//end transf_scale
else {//not transf_scale
T_mat MInv_ZtZ;//=(Sigma_inv + ZtZ)^-1 * ZtZ
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ == ZtZj_ and L_inv are diagonal
MInv_ZtZ = T_mat(ZtZ_[cluster_i].rows(), ZtZ_[cluster_i].cols());
MInv_ZtZ.setIdentity();//initialize
MInv_ZtZ.diagonal().array() = ZtZ_[cluster_i].diagonal().array() / (chol_facts_[cluster_i].diagonal().array().square());
}
else {
T_mat ZtZ = T_mat(ZtZ_[cluster_i]);//TODO: this step is not needed for sparse matrices (i.e. copying is not required)
MInv_ZtZ = chol_facts_solve_[cluster_i].solve(ZtZ);
}
T_mat MInv_ZtZ_t = MInv_ZtZ.transpose();//TODO: possible without saving MInv_ZtZ.transpose()? -> compiler problem in MInv_ZtZ.cwiseProduct(MInv_ZtZ.transpose())
FI(0, 0) += (num_data_per_cluster_[cluster_i] - 2. * MInv_ZtZ.diagonal().sum() + (double)(MInv_ZtZ.cwiseProduct(MInv_ZtZ_t)).sum()) / (cov_pars[0] * cov_pars[0] * 2.);
for (int j = 0; j < num_comps_total_; ++j) {
T_mat ZjZ_MInv_ZtZ_t = MInv_ZtZ_t * ZtZj_[cluster_i][j];
T_mat ZtZj = T_mat(ZtZj_[cluster_i][j]);
double trace_PsiInvGradPsi;
if (num_comps_total_ > 1) {
T_mat MInv_ZtZj = chol_facts_solve_[cluster_i].solve(ZtZj);
trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) +
(double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZj)).sum();
}
else {
trace_PsiInvGradPsi = Zj_square_sum_[cluster_i][j] - 2. * (double)(LInvZtZj_[cluster_i][j].squaredNorm()) +
(double)(ZjZ_MInv_ZtZ_t.cwiseProduct(MInv_ZtZ)).sum();
}
FI(0, j + 1) += trace_PsiInvGradPsi / (cov_pars[0] * cov_pars[0] * 2.);
}
}//end not transf_scale
}//end include_error_var
//Remaining covariance parameters
for (int j = 0; j < num_comps_total_; ++j) {
sp_mat_t* Z_j = re_comps_[cluster_i][j]->GetZ();
for (int k = j; k < num_comps_total_; ++k) {
sp_mat_t* Z_k = re_comps_[cluster_i][k]->GetZ();
sp_mat_t Zjt_Zk = (*Z_j).transpose() * (*Z_k);
T_mat LInvZtZj_t_LInvZtZk = LInvZtZj_[cluster_i][j].transpose() * LInvZtZj_[cluster_i][k];
double FI_jk = Zjt_Zk.squaredNorm() + LInvZtZj_t_LInvZtZk.squaredNorm() - 2. * (double)(Zjt_Zk.cwiseProduct(LInvZtZj_t_LInvZtZk)).sum();
if (transf_scale) {
FI_jk *= cov_pars[j + 1] * cov_pars[k + 1];
}
else {
FI_jk /= cov_pars[0] * cov_pars[0];
}
FI(j + start_cov_pars, k + start_cov_pars) += FI_jk / 2.;
}
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat psi_inv;
if (use_saved_psi_inv) {
psi_inv = psi_inv_[cluster_i];
}
else {
CalcPsiInv(psi_inv, cluster_i);
}
if (!transf_scale) {
psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix
}
//Calculate Psi^-1 * derivative(Psi)
std::vector<T_mat> psi_inv_deriv_psi(num_cov_par_ - 1);
int deriv_par_nb = 0;
for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
psi_inv_deriv_psi[deriv_par_nb] = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0]));
deriv_par_nb++;
}
}
//Calculate Fisher information
if (include_error_var) {
//First calculate terms for nugget effect / noise variance parameter
if (transf_scale) {//Optimization is done on transformed scale (error variance factored out and log-scale)
//The derivative for the nugget variance on the transformed scale is the original covariance matrix Psi, i.e. psi_inv_grad_psi_sigma2 is the identity matrix.
FI(0, 0) += num_data_per_cluster_[cluster_i] / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += psi_inv_deriv_psi[par_nb].diagonal().sum() / 2.;
}
}
else {//Original scale for asymptotic covariance matrix
//The derivative for the nugget variance is the identity matrix, i.e. psi_inv_grad_psi_sigma2 = psi_inv.
FI(0, 0) += ((double)(psi_inv.cwiseProduct(psi_inv)).sum()) / 2.;
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
FI(0, par_nb + 1) += ((double)(psi_inv.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
}
}
}
//Remaining covariance parameters
for (int par_nb = 0; par_nb < num_cov_par_ - 1; ++par_nb) {
T_mat psi_inv_grad_psi_par_nb_T = psi_inv_deriv_psi[par_nb].transpose();
FI(par_nb + start_cov_pars, par_nb + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb])).sum()) / 2.;
for (int par_nb_cross = par_nb + 1; par_nb_cross < num_cov_par_ - 1; ++par_nb_cross) {
FI(par_nb + start_cov_pars, par_nb_cross + start_cov_pars) += ((double)(psi_inv_grad_psi_par_nb_T.cwiseProduct(psi_inv_deriv_psi[par_nb_cross])).sum()) / 2.;
}
psi_inv_deriv_psi[par_nb].resize(0, 0);//not needed anymore
psi_inv_grad_psi_par_nb_T.resize(0, 0);
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end not vecchia_approx_
}//end loop over clusters
FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose();
//for (int i = 0; i < std::min((int)FI.rows(),4); ++i) {//For debugging only
// for (int j = i; j < std::min((int)FI.cols(),4); ++j) {
// Log::REInfo("FI(%d,%d) %g", i, j, FI(i, j));
// }
//}
}
/*!
* \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization)
* \param cov_pars MLE of covariance parameters
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) {
SetCovParsComps(cov_pars);
CalcCovFactor(true, false, cov_pars[0], true);
den_mat_t FI;
CalcFisherInformation(cov_pars, FI, false, true, false);
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
/*!
* \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information
* \param cov_pars MLE of covariance parameters
* \param X Covariate data for linear fixed-effect
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) {
if ((int)std_dev.size() >= num_data_) {
Log::REWarning("Sample size too small to calculate standard deviations for coefficients");
for (int i = 0; i < (int)std_dev.size(); ++i) {
std_dev[i] = std::numeric_limits<double>::quiet_NaN();
}
}
else {
SetCovParsComps(cov_pars);
CalcCovFactor(false, true, 1., false);
den_mat_t FI((int)X.cols(), (int)X.cols());
CalcXTPsiInvX(X, FI);
FI /= cov_pars[0];
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) for one cluster
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
* \param re_group_rand_coef_data_pred Random coefficient data for grouped REs
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the predictive/conditional covariance matrix is calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param predict_var If true, the predictive/conditional variances are calculated (default=false) (predict_var and predict_cov_mat cannot be both true)
* \param[out] mean_pred_id Predictive mean
* \param[out] cov_mat_pred_id Predictive covariance matrix
* \param[out] var_pred_id Predictive variances
*/
void CalcPred(gp_id_t cluster_i,
int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const std::vector<std::vector<string_t>>& re_group_levels_pred,
const double* re_group_rand_coef_data_pred,
const den_mat_t& gp_coords_mat_pred,
const double* gp_rand_coef_data_pred,
bool predict_cov_mat,
bool predict_var,
vec_t& mean_pred_id,
T_mat& cov_mat_pred_id,
vec_t& var_pred_id) {
int num_REs_obs, num_REs_pred;
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
num_REs_pred = (int)re_group_levels_pred[0].size();
num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs();
}
else if (only_one_GP_calculations_on_RE_scale_) {
num_REs_pred = (int)gp_coords_mat_pred.rows();
num_REs_obs = re_comps_[cluster_i][0]->GetNumUniqueREs();
}
else {
num_REs_pred = num_data_per_cluster_pred[cluster_i];
num_REs_obs = num_data_per_cluster_[cluster_i];
}
if (predict_var) {
if (gauss_likelihood_) {
var_pred_id = vec_t::Ones(num_REs_pred);//nugget effect
}
else {
var_pred_id = vec_t::Zero(num_REs_pred);
}
}
if (predict_cov_mat) {
cov_mat_pred_id = T_mat(num_REs_pred, num_REs_pred);
if (gauss_likelihood_) {
cov_mat_pred_id.setIdentity();//nugget effect
}
else {
cov_mat_pred_id.setZero();
}
}
T_mat cross_cov(num_REs_pred, num_REs_obs);//Cross covariance between prediction and observation points
//Calculate covariance matrices
int cn = 0;//component number
bool dont_add_but_overwrite = true;
//Grouped random effects
if (num_re_group_ > 0) {
if (only_one_grouped_RE_calculations_on_RE_scale_ || only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
re_comp->AddPredCovMatrices(re_group_levels_pred[0],
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
true,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
}
else {
for (int j = 0; j < num_re_group_; ++j) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
group_data.push_back(re_group_levels_pred[j][id]);
}
re_comp->AddPredCovMatrices(group_data,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
false,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
cn += 1;
}
if (num_re_group_rand_coef_ > 0) {
//Random coefficient grouped random effects
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
std::shared_ptr<RECompGroup<T_mat>> re_comp = std::dynamic_pointer_cast<RECompGroup<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]);
group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index
}
re_comp->AddPredCovMatrices(group_data,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
false,
false,
rand_coef_data.data());
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data());
}
cn += 1;
}
}
}
}//end grouped random effects
//Gaussian process
if (num_gp_ > 0) {
std::shared_ptr<RECompGP<T_mat>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]);
re_comp_base->AddPredCovMatrices(re_comp_base->coords_,
gp_coords_mat_pred,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
dont_add_but_overwrite,
nullptr);
dont_add_but_overwrite = false;
if (predict_var) {
re_comp_base->AddPredUncondVar(var_pred_id.data(), num_REs_pred, nullptr);
}
cn += 1;
if (num_gp_rand_coef_ > 0) {
std::shared_ptr<RECompGP<T_mat>> re_comp;
//Random coefficient Gaussian processes
for (int j = 0; j < num_gp_rand_coef_; ++j) {
re_comp = std::dynamic_pointer_cast<RECompGP<T_mat>>(re_comps_[cluster_i][cn]);
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
re_comp->AddPredCovMatrices(re_comp_base->coords_,
gp_coords_mat_pred,
cross_cov,
cov_mat_pred_id,
predict_cov_mat,
false,
rand_coef_data.data());
if (predict_var) {
re_comp->AddPredUncondVar(var_pred_id.data(), num_REs_pred, rand_coef_data.data());
}
cn += 1;
}
}
}
//Calculate predictive means and covariances
if (gauss_likelihood_) {//Gaussian data
if (only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
vec_t Zt_y_aux = vec_t::Zero(num_REs_obs);
#pragma omp parallel
{
vec_t Zt_y_aux_private = vec_t::Zero(num_REs_obs);
#pragma omp for
for (data_size_t i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
Zt_y_aux_private[re_comps_[cluster_i][0]->random_effects_indices_of_data_[i]] += y_aux_[cluster_i][i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_REs_obs; ++i_re) {
Zt_y_aux[i_re] += Zt_y_aux_private[i_re];
}
}//end omp critical
}//end omp parallel
mean_pred_id = cross_cov * Zt_y_aux;
}//end only_one_grouped_RE_calculations_on_RE_scale_for_prediction_
else {
mean_pred_id = cross_cov * y_aux_[cluster_i];
}
if ((predict_cov_mat || predict_var) && only_one_grouped_RE_calculations_on_RE_scale_for_prediction_) {
sp_mat_t* Z = re_comps_[cluster_i][0]->GetZ();
T_mat cross_cov_temp = cross_cov;
cross_cov = cross_cov_temp * (*Z).transpose();
cross_cov_temp.resize(0, 0);
//TODO (low-prio): things could be done more efficiently (using random_effects_indices_of_data_) as ZtZ_ is diagonal
}
if (predict_cov_mat){
if (only_grouped_REs_use_woodbury_identity_) {
T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose());
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
ZtM_aux = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtM_aux;
cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * ZtM_aux);
}
else {
cov_mat_pred_id -= (cross_cov * T_mat(cross_cov.transpose()) - ZtM_aux.transpose() * chol_facts_solve_[cluster_i].solve(ZtM_aux));
}
}
else {
cov_mat_pred_id -= (cross_cov * (chol_facts_solve_[cluster_i].solve(T_mat(cross_cov.transpose()))));
}
}//end predict_cov_mat
if (predict_var) {
T_mat M_aux2;
if (only_grouped_REs_use_woodbury_identity_) {
T_mat ZtM_aux = T_mat(Zt_[cluster_i] * cross_cov.transpose());
if (num_re_group_total_ == 1 && num_comps_total_ == 1) {//only one random effect -> ZtZ_ is diagonal
M_aux2 = chol_facts_[cluster_i].diagonal().array().inverse().matrix().asDiagonal() * ZtM_aux;
}
else {
CalcLInvH(chol_facts_[cluster_i], ZtM_aux, M_aux2, true);
}
M_aux2 = M_aux2.cwiseProduct(M_aux2);
cross_cov = cross_cov.cwiseProduct(cross_cov);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs_pred; ++i) {
var_pred_id[i] -= cross_cov.row(i).sum() - M_aux2.col(i).sum();
}
}//end only_grouped_REs_use_woodbury_identity_
else {//not only_grouped_REs_use_woodbury_identity_
T_mat M_auxT = cross_cov.transpose();
CalcLInvH(chol_facts_[cluster_i], M_auxT, M_aux2, true);
M_aux2 = M_aux2.cwiseProduct(M_aux2);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs_pred; ++i) {
var_pred_id[i] -= M_aux2.col(i).sum();
}
}//end not only_grouped_REs_use_woodbury_identity_
}//end predict_var
}//end gauss_likelihood_
if (!gauss_likelihood_) {//not gauss_likelihood_
const double* fixed_effects_cluster_i_ptr = nullptr;
// Note that fixed_effects_cluster_i_ptr is not used since calc_mode == false
// The mode has been calculated already before in the Predict() function above
if (vecchia_approx_) {
likelihood_[cluster_i]->PredictLAApproxVecchia(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
B_[cluster_i],
D_inv_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else {
if (only_grouped_REs_use_woodbury_identity_ && !only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxGroupedRE(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
SigmaI_[cluster_i],
Zt_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else if (only_one_grouped_RE_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
re_comps_[cluster_i][0]->cov_pars_[0],
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else if (only_one_GP_calculations_on_RE_scale_) {
likelihood_[cluster_i]->PredictLAApproxOnlyOneGPCalculationsOnREScale(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i], //Note: ZSigmaZt_ contains only Sigma if only_one_GP_calculations_on_RE_scale_==true
re_comps_[cluster_i][0]->random_effects_indices_of_data_.data(),
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
else {
likelihood_[cluster_i]->PredictLAApproxStable(y_[cluster_i].data(),
y_int_[cluster_i].data(),
fixed_effects_cluster_i_ptr,
num_data_per_cluster_[cluster_i],
ZSigmaZt_[cluster_i],
cross_cov,
mean_pred_id,
cov_mat_pred_id,
var_pred_id,
predict_cov_mat,
predict_var,
false);
}
}
}//end not gauss_likelihood_
}//end CalcPred
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli);
if (CondObsOnly) {
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1);
}
//Random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder)
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
}
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[num_data_cli + i];
if ((num_data_cli + i) > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.));
}
}
}
sp_mat_t Bpo(num_data_pred_cli, num_data_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel)
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli);
Dp.setIdentity();//Put 1 on the diagonal (for nugget effect)
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
Dp.coeffRef(i, i) += d_comp_j;
}
//2. remaining terms
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn);
}
}
Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}//end loop over data i
mean_pred_id = -Bpo * y_[cluster_i];
if (!CondObsOnly) {
sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data());
}
if (predict_cov_mat) {
if (CondObsOnly) {
cov_mat_pred_id = Dp;
}
else {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t Bp_inv;
eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true);
cov_mat_pred_id = T_mat(Bp_inv * Dp * Bp_inv.transpose());
}
}
}//end CalcPredVecchiaObservedFirstOrder
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Total number of prediction locations (over all clusters)
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_tot, dim_gp_coords_);
coords_all << gp_coords_mat_pred, gp_coords_mat_obs;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot);
find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
//Prepare data for random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_cli; ++i) {
rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
if (j == 0) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
}
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bo, Bop, and Bp
std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
for (int i = 0; i < num_data_cli; ++i) {
entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) {
if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.));
}
}
}
sp_mat_t Bo(num_data_cli, num_data_cli);
sp_mat_t Bop(num_data_cli, num_data_pred_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel)
Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end());
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Do_inv(num_data_cli, num_data_cli);
sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli);
Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect)
Dp_inv.setIdentity();
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) += d_comp_j;
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j;
}
}
//2. remaining terms
if (i > 0) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (i < num_data_pred_cli) {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn);
}
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli);
}
}//end loop over data i
sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop;
chol_sp_mat_t CholFact;
CholFact.compute(cond_prec);
if (predict_cov_mat) {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t cond_prec_chol = CholFact.matrixL();
sp_mat_t cond_prec_chol_inv;
eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true);
cov_mat_pred_id = T_mat(cond_prec_chol_inv.transpose() * cond_prec_chol_inv);
mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i];
}
else {
mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]);
}
}//end CalcPredVecchiaPredictedFirstOrder
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T_mat& cov_mat_pred_id) {
if (num_gp_rand_coef_ > 0) {
Log::REFatal("The Vecchia approximation for latent process(es) is currently not implemented when having random coefficients");
}
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
//Determine number of unique observartion locations
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx);
int num_coord_unique_obs = (int)uniques.size();
//Determine unique locations (observed and predicted)
DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx);
int num_coord_unique = (int)uniques.size();
den_mat_t coords_all_unique;
if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed
coords_all_unique = coords_all;
}
else {
coords_all_unique = coords_all(uniques, Eigen::all);
}
//Determine incidence matrices
sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size());
sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size());
for (int i = 0; i < num_data_tot; ++i) {
if (i < num_data_cli) {
Z_o.insert(i, unique_idx[i]) = 1.;
}
else {
Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.;
}
}
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique);
if (CondObsOnly) {//find neighbors among both the observed locations only
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_B;
for (int i = 0; i < num_coord_unique; ++i) {
entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
sp_mat_t B(num_coord_unique, num_coord_unique);
B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel)
sp_mat_t D(num_coord_unique, num_coord_unique);
D.setIdentity();
D.diagonal().array() = 0.;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_coord_unique; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0];
//2. remaining terms
if (i > 0) {
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end loop over data i
//Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1
sp_mat_t D_inv(num_coord_unique, num_coord_unique);
D_inv.setIdentity();
D_inv.diagonal().array() = D.diagonal().array().pow(-1);
sp_mat_t Identity_all(num_coord_unique, num_coord_unique);
Identity_all.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true);
//Calculate inverse of covariance matrix for observed data using the Woodbury identity
sp_mat_t Z_o_T = Z_o.transpose();
sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o;
chol_sp_mat_t CholFac_M_aux_Woodbury;
CholFac_M_aux_Woodbury.compute(M_aux_Woodbury);
if (predict_cov_mat) {
//Using Eigen's solver
sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T);
sp_mat_t Identity_obs(num_data_cli, num_data_cli);
Identity_obs.setIdentity();
sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs;
sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T;
sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv;
mean_pred_id = M_aux * y_[cluster_i];
sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli);
Identity_pred.setIdentity();
cov_mat_pred_id = T_mat(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose());
}
else {
vec_t resp_aux = Z_o_T * y_[cluster_i];
vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux);
resp_aux = y_[cluster_i] - Z_o * resp_aux2;
mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux;
}
}//end CalcPredVecchiaLatentObservedFirstOrder
friend class REModel;
};
} // namespace GPBoost
#endif // GPB_RE_MODEL_TEMPLATE_H_
|
OMPIRBuilder.h | //===- IR/OpenMPIRBuilder.h - OpenMP encoding builder for LLVM IR - C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the OpenMPIRBuilder class and helpers used as a convenient
// way to create LLVM instructions for OpenMP directives.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#define LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/Support/Allocator.h"
#include <forward_list>
namespace llvm {
class CanonicalLoopInfo;
/// An interface to create LLVM-IR for OpenMP directives.
///
/// Each OpenMP directive has a corresponding public generator method.
class OpenMPIRBuilder {
public:
/// Create a new OpenMPIRBuilder operating on the given module \p M. This will
/// not have an effect on \p M (see initialize).
OpenMPIRBuilder(Module &M) : M(M), Builder(M.getContext()) {}
~OpenMPIRBuilder();
/// Initialize the internal state, this will put structures types and
/// potentially other helpers into the underlying module. Must be called
/// before any other method and only once!
void initialize();
/// Finalize the underlying module, e.g., by outlining regions.
/// \param Fn The function to be finalized. If not used,
/// all functions are finalized.
/// \param AllowExtractorSinking Flag to include sinking instructions,
/// emitted by CodeExtractor, in the
/// outlined region. Default is false.
void finalize(Function *Fn = nullptr, bool AllowExtractorSinking = false);
/// Add attributes known for \p FnID to \p Fn.
void addAttributes(omp::RuntimeFunction FnID, Function &Fn);
/// Type used throughout for insertion points.
using InsertPointTy = IRBuilder<>::InsertPoint;
/// Callback type for variable finalization (think destructors).
///
/// \param CodeGenIP is the insertion point at which the finalization code
/// should be placed.
///
/// A finalize callback knows about all objects that need finalization, e.g.
/// destruction, when the scope of the currently generated construct is left
/// at the time, and location, the callback is invoked.
using FinalizeCallbackTy = std::function<void(InsertPointTy CodeGenIP)>;
struct FinalizationInfo {
/// The finalization callback provided by the last in-flight invocation of
/// createXXXX for the directive of kind DK.
FinalizeCallbackTy FiniCB;
/// The directive kind of the innermost directive that has an associated
/// region which might require finalization when it is left.
omp::Directive DK;
/// Flag to indicate if the directive is cancellable.
bool IsCancellable;
};
/// Push a finalization callback on the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void pushFinalizationCB(const FinalizationInfo &FI) {
FinalizationStack.push_back(FI);
}
/// Pop the last finalization callback from the finalization stack.
///
/// NOTE: Temporary solution until Clang CG is gone.
void popFinalizationCB() { FinalizationStack.pop_back(); }
/// Callback type for body (=inner region) code generation
///
/// The callback takes code locations as arguments, each describing a
/// location at which code might need to be generated or a location that is
/// the target of control transfer.
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the body code should be
/// placed.
/// \param ContinuationBB is the basic block target to leave the body.
///
/// Note that all blocks pointed to by the arguments have terminators.
using BodyGenCallbackTy =
function_ref<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
// This is created primarily for sections construct as llvm::function_ref
// (BodyGenCallbackTy) is not storable (as described in the comments of
// function_ref class - function_ref contains non-ownable reference
// to the callable.
using StorableBodyGenCallbackTy =
std::function<void(InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
BasicBlock &ContinuationBB)>;
/// Callback type for loop body code generation.
///
/// \param CodeGenIP is the insertion point where the loop's body code must be
/// placed. This will be a dedicated BasicBlock with a
/// conditional branch from the loop condition check and
/// terminated with an unconditional branch to the loop
/// latch.
/// \param IndVar is the induction variable usable at the insertion point.
using LoopBodyGenCallbackTy =
function_ref<void(InsertPointTy CodeGenIP, Value *IndVar)>;
/// Callback type for variable privatization (think copy & default
/// constructor).
///
/// \param AllocaIP is the insertion point at which new alloca instructions
/// should be placed.
/// \param CodeGenIP is the insertion point at which the privatization code
/// should be placed.
/// \param Original The value being copied/created, should not be used in the
/// generated IR.
/// \param Inner The equivalent of \p Original that should be used in the
/// generated IR; this is equal to \p Original if the value is
/// a pointer and can thus be passed directly, otherwise it is
/// an equivalent but different value.
/// \param ReplVal The replacement value, thus a copy or new created version
/// of \p Inner.
///
/// \returns The new insertion point where code generation continues and
/// \p ReplVal the replacement value.
using PrivatizeCallbackTy = function_ref<InsertPointTy(
InsertPointTy AllocaIP, InsertPointTy CodeGenIP, Value &Original,
Value &Inner, Value *&ReplVal)>;
/// Description of a LLVM-IR insertion point (IP) and a debug/source location
/// (filename, line, column, ...).
struct LocationDescription {
template <typename T, typename U>
LocationDescription(const IRBuilder<T, U> &IRB)
: IP(IRB.saveIP()), DL(IRB.getCurrentDebugLocation()) {}
LocationDescription(const InsertPointTy &IP) : IP(IP) {}
LocationDescription(const InsertPointTy &IP, const DebugLoc &DL)
: IP(IP), DL(DL) {}
InsertPointTy IP;
DebugLoc DL;
};
/// Emitter methods for OpenMP directives.
///
///{
/// Generator for '#omp barrier'
///
/// \param Loc The location where the barrier directive was encountered.
/// \param DK The kind of directive that caused the barrier.
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy createBarrier(const LocationDescription &Loc, omp::Directive DK,
bool ForceSimpleCall = false,
bool CheckCancelFlag = true);
/// Generator for '#omp cancel'
///
/// \param Loc The location where the directive was encountered.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param CanceledDirective The kind of directive that is cancled.
///
/// \returns The insertion point after the barrier.
InsertPointTy createCancel(const LocationDescription &Loc, Value *IfCondition,
omp::Directive CanceledDirective);
/// Generator for '#omp parallel'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param BodyGenCB Callback that will generate the region code.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IfCondition The evaluated 'if' clause expression, if any.
/// \param NumThreads The evaluated 'num_threads' clause expression, if any.
/// \param ProcBind The value of the 'proc_bind' clause (see ProcBindKind).
/// \param IsCancellable Flag to indicate a cancellable parallel region.
///
/// \returns The insertion position *after* the parallel.
IRBuilder<>::InsertPoint
createParallel(const LocationDescription &Loc, InsertPointTy AllocaIP,
BodyGenCallbackTy BodyGenCB, PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, Value *IfCondition,
Value *NumThreads, omp::ProcBindKind ProcBind,
bool IsCancellable);
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// This generator operates on the logical iteration space of the loop, i.e.
/// the caller only has to provide a loop trip count of the loop as defined by
/// base language semantics. The trip count is interpreted as an unsigned
/// integer. The induction variable passed to \p BodyGenCB will be of the same
/// type and run from 0 to \p TripCount - 1. It is up to the callback to
/// convert the logical iteration variable to the loop counter variable in the
/// loop body.
///
/// \param Loc The insert and source location description. The insert
/// location can be between two instructions or the end of a
/// degenerate block (e.g. a BB under construction).
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param TripCount Number of iterations the loop body is executed.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *TripCount,
const Twine &Name = "loop");
/// Generator for the control flow structure of an OpenMP canonical loop.
///
/// Instead of a logical iteration space, this allows specifying user-defined
/// loop counter values using increment, upper- and lower bounds. To
/// disambiguate the terminology when counting downwards, instead of lower
/// bounds we use \p Start for the loop counter value in the first body
/// iteration.
///
/// Consider the following limitations:
///
/// * A loop counter space over all integer values of its bit-width cannot be
/// represented. E.g using uint8_t, its loop trip count of 256 cannot be
/// stored into an 8 bit integer):
///
/// DO I = 0, 255, 1
///
/// * Unsigned wrapping is only supported when wrapping only "once"; E.g.
/// effectively counting downwards:
///
/// for (uint8_t i = 100u; i > 0; i += 127u)
///
///
/// TODO: May need to add additional parameters to represent:
///
/// * Allow representing downcounting with unsigned integers.
///
/// * Sign of the step and the comparison operator might disagree:
///
/// for (int i = 0; i < 42; i -= 1u)
///
//
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the loop body code.
/// \param Start Value of the loop counter for the first iterations.
/// \param Stop Loop counter values past this will stop the loop.
/// \param Step Loop counter increment after each iteration; negative
/// means counting down.
/// \param IsSigned Whether Start, Stop and Step are signed integers.
/// \param InclusiveStop Whether \p Stop itself is a valid value for the loop
/// counter.
/// \param ComputeIP Insertion point for instructions computing the trip
/// count. Can be used to ensure the trip count is available
/// at the outermost loop of a loop nest. If not set,
/// defaults to the preheader of the generated loop.
/// \param Name Base name used to derive BB and instruction names.
///
/// \returns An object representing the created control flow structure which
/// can be used for loop-associated directives.
CanonicalLoopInfo *createCanonicalLoop(const LocationDescription &Loc,
LoopBodyGenCallbackTy BodyGenCB,
Value *Start, Value *Stop, Value *Step,
bool IsSigned, bool InclusiveStop,
InsertPointTy ComputeIP = {},
const Twine &Name = "loop");
/// Collapse a loop nest into a single loop.
///
/// Merges loops of a loop nest into a single CanonicalLoopNest representation
/// that has the same number of innermost loop iterations as the origin loop
/// nest. The induction variables of the input loops are derived from the
/// collapsed loop's induction variable. This is intended to be used to
/// implement OpenMP's collapse clause. Before applying a directive,
/// collapseLoops normalizes a loop nest to contain only a single loop and the
/// directive's implementation does not need to handle multiple loops itself.
/// This does not remove the need to handle all loop nest handling by
/// directives, such as the ordered(<n>) clause or the simd schedule-clause
/// modifier of the worksharing-loop directive.
///
/// Example:
/// \code
/// for (int i = 0; i < 7; ++i) // Canonical loop "i"
/// for (int j = 0; j < 9; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After collapsing with Loops={i,j}, the loop is changed to
/// \code
/// for (int ij = 0; ij < 63; ++ij) {
/// int i = ij / 9;
/// int j = ij % 9;
/// body(i, j);
/// }
/// \endcode
///
/// In the current implementation, the following limitations apply:
///
/// * All input loops have an induction variable of the same type.
///
/// * The collapsed loop will have the same trip count integer type as the
/// input loops. Therefore it is possible that the collapsed loop cannot
/// represent all iterations of the input loops. For instance, assuming a
/// 32 bit integer type, and two input loops both iterating 2^16 times, the
/// theoretical trip count of the collapsed loop would be 2^32 iteration,
/// which cannot be represented in an 32-bit integer. Behavior is undefined
/// in this case.
///
/// * The trip counts of every input loop must be available at \p ComputeIP.
/// Non-rectangular loops are not yet supported.
///
/// * At each nest level, code between a surrounding loop and its nested loop
/// is hoisted into the loop body, and such code will be executed more
/// often than before collapsing (or not at all if any inner loop iteration
/// has a trip count of 0). This is permitted by the OpenMP specification.
///
/// \param DL Debug location for instructions added for collapsing,
/// such as instructions to compute/derive the input loop's
/// induction variables.
/// \param Loops Loops in the loop nest to collapse. Loops are specified
/// from outermost-to-innermost and every control flow of a
/// loop's body must pass through its directly nested loop.
/// \param ComputeIP Where additional instruction that compute the collapsed
/// trip count. If not set, defaults to before the generated
/// loop.
///
/// \returns The CanonicalLoopInfo object representing the collapsed loop.
CanonicalLoopInfo *collapseLoops(DebugLoc DL,
ArrayRef<CanonicalLoopInfo *> Loops,
InsertPointTy ComputeIP);
/// Modifies the canonical loop to be a statically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// TODO: Workshare loops with static scheduling may contain up to two loops
/// that fulfill the requirements of an OpenMP canonical loop. One for
/// iterating over all iterations of a chunk and another one for iterating
/// over all chunks that are executed on the same thread. Returning
/// CanonicalLoopInfo objects representing them may eventually be useful for
/// the apply clause planned in OpenMP 6.0, but currently whether these are
/// canonical loops is irrelevant.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be inserted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyStaticWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a dynamically-scheduled workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain, and then in each iteration
/// to update the loop counter.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param SchedType Type of scheduling to be passed to the init function.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
/// \param Chunk The size of loop chunk considered as a unit when
/// scheduling. If \p nullptr, defaults to 1.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyDynamicWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP,
omp::OMPScheduleType SchedType,
bool NeedsBarrier,
Value *Chunk = nullptr);
/// Modifies the canonical loop to be a workshare loop.
///
/// This takes a \p LoopInfo representing a canonical loop, such as the one
/// created by \p createCanonicalLoop and emits additional instructions to
/// turn it into a workshare loop. In particular, it calls to an OpenMP
/// runtime function in the preheader to obtain the loop bounds to be used in
/// the current thread, updates the relevant instructions in the canonical
/// loop and calls to an OpenMP runtime finalization function after the loop.
///
/// \param DL Debug location for instructions added for the
/// workshare-loop construct itself.
/// \param CLI A descriptor of the canonical loop to workshare.
/// \param AllocaIP An insertion point for Alloca instructions usable in the
/// preheader of the loop.
/// \param NeedsBarrier Indicates whether a barrier must be insterted after
/// the loop.
///
/// \returns Point where to insert code after the workshare construct.
InsertPointTy applyWorkshareLoop(DebugLoc DL, CanonicalLoopInfo *CLI,
InsertPointTy AllocaIP, bool NeedsBarrier);
/// Tile a loop nest.
///
/// Tiles the loops of \p Loops by the tile sizes in \p TileSizes. Loops in
/// \p/ Loops must be perfectly nested, from outermost to innermost loop
/// (i.e. Loops.front() is the outermost loop). The trip count llvm::Value
/// of every loop and every tile sizes must be usable in the outermost
/// loop's preheader. This implies that the loop nest is rectangular.
///
/// Example:
/// \code
/// for (int i = 0; i < 15; ++i) // Canonical loop "i"
/// for (int j = 0; j < 14; ++j) // Canonical loop "j"
/// body(i, j);
/// \endcode
///
/// After tiling with Loops={i,j} and TileSizes={5,7}, the loop is changed to
/// \code
/// for (int i1 = 0; i1 < 3; ++i1)
/// for (int j1 = 0; j1 < 2; ++j1)
/// for (int i2 = 0; i2 < 5; ++i2)
/// for (int j2 = 0; j2 < 7; ++j2)
/// body(i1*3+i2, j1*3+j2);
/// \endcode
///
/// The returned vector are the loops {i1,j1,i2,j2}. The loops i1 and j1 are
/// referred to the floor, and the loops i2 and j2 are the tiles. Tiling also
/// handles non-constant trip counts, non-constant tile sizes and trip counts
/// that are not multiples of the tile size. In the latter case the tile loop
/// of the last floor-loop iteration will have fewer iterations than specified
/// as its tile size.
///
///
/// @param DL Debug location for instructions added by tiling, for
/// instance the floor- and tile trip count computation.
/// @param Loops Loops to tile. The CanonicalLoopInfo objects are
/// invalidated by this method, i.e. should not used after
/// tiling.
/// @param TileSizes For each loop in \p Loops, the tile size for that
/// dimensions.
///
/// \returns A list of generated loops. Contains twice as many loops as the
/// input loop nest; the first half are the floor loops and the
/// second half are the tile loops.
std::vector<CanonicalLoopInfo *>
tileLoops(DebugLoc DL, ArrayRef<CanonicalLoopInfo *> Loops,
ArrayRef<Value *> TileSizes);
/// Fully unroll a loop.
///
/// Instead of unrolling the loop immediately (and duplicating its body
/// instructions), it is deferred to LLVM's LoopUnrollPass by adding loop
/// metadata.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopFull(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Fully or partially unroll a loop. How the loop is unrolled is determined
/// using LLVM's LoopUnrollPass.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
void unrollLoopHeuristic(DebugLoc DL, CanonicalLoopInfo *Loop);
/// Partially unroll a loop.
///
/// The CanonicalLoopInfo of the unrolled loop for use with chained
/// loop-associated directive can be requested using \p UnrolledCLI. Not
/// needing the CanonicalLoopInfo allows more efficient code generation by
/// deferring the actual unrolling to the LoopUnrollPass using loop metadata.
/// A loop-associated directive applied to the unrolled loop needs to know the
/// new trip count which means that if using a heuristically determined unroll
/// factor (\p Factor == 0), that factor must be computed immediately. We are
/// using the same logic as the LoopUnrollPass to derived the unroll factor,
/// but which assumes that some canonicalization has taken place (e.g.
/// Mem2Reg, LICM, GVN, Inlining, etc.). That is, the heuristic will perform
/// better when the unrolled loop's CanonicalLoopInfo is not needed.
///
/// \param DL Debug location for instructions added by unrolling.
/// \param Loop The loop to unroll. The loop will be invalidated.
/// \param Factor The factor to unroll the loop by. A factor of 0
/// indicates that a heuristic should be used to determine
/// the unroll-factor.
/// \param UnrolledCLI If non-null, receives the CanonicalLoopInfo of the
/// partially unrolled loop. Otherwise, uses loop metadata
/// to defer unrolling to the LoopUnrollPass.
void unrollLoopPartial(DebugLoc DL, CanonicalLoopInfo *Loop, int32_t Factor,
CanonicalLoopInfo **UnrolledCLI);
/// Generator for '#omp flush'
///
/// \param Loc The location where the flush directive was encountered
void createFlush(const LocationDescription &Loc);
/// Generator for '#omp taskwait'
///
/// \param Loc The location where the taskwait directive was encountered.
void createTaskwait(const LocationDescription &Loc);
/// Generator for '#omp taskyield'
///
/// \param Loc The location where the taskyield directive was encountered.
void createTaskyield(const LocationDescription &Loc);
/// Functions used to generate reductions. Such functions take two Values
/// representing LHS and RHS of the reduction, respectively, and a reference
/// to the value that is updated to refer to the reduction result.
using ReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *, Value *&)>;
/// Functions used to generate atomic reductions. Such functions take two
/// Values representing pointers to LHS and RHS of the reduction. They are
/// expected to atomically update the LHS to the reduced value.
using AtomicReductionGenTy =
function_ref<InsertPointTy(InsertPointTy, Value *, Value *)>;
/// Information about an OpenMP reduction.
struct ReductionInfo {
ReductionInfo(Value *Variable, Value *PrivateVariable,
ReductionGenTy ReductionGen,
AtomicReductionGenTy AtomicReductionGen)
: Variable(Variable), PrivateVariable(PrivateVariable),
ReductionGen(ReductionGen), AtomicReductionGen(AtomicReductionGen) {}
/// Returns the type of the element being reduced.
Type *getElementType() const {
return Variable->getType()->getPointerElementType();
}
/// Reduction variable of pointer type.
Value *Variable;
/// Thread-private partial reduction variable.
Value *PrivateVariable;
/// Callback for generating the reduction body. The IR produced by this will
/// be used to combine two values in a thread-safe context, e.g., under
/// lock or within the same thread, and therefore need not be atomic.
ReductionGenTy ReductionGen;
/// Callback for generating the atomic reduction body, may be null. The IR
/// produced by this will be used to atomically combine two values during
/// reduction. If null, the implementation will use the non-atomic version
/// along with the appropriate synchronization mechanisms.
AtomicReductionGenTy AtomicReductionGen;
};
// TODO: provide atomic and non-atomic reduction generators for reduction
// operators defined by the OpenMP specification.
/// Generator for '#omp reduction'.
///
/// Emits the IR instructing the runtime to perform the specific kind of
/// reductions. Expects reduction variables to have been privatized and
/// initialized to reduction-neutral values separately. Emits the calls to
/// runtime functions as well as the reduction function and the basic blocks
/// performing the reduction atomically and non-atomically.
///
/// The code emitted for the following:
///
/// \code
/// type var_1;
/// type var_2;
/// #pragma omp <directive> reduction(reduction-op:var_1,var_2)
/// /* body */;
/// \endcode
///
/// corresponds to the following sketch.
///
/// \code
/// void _outlined_par() {
/// // N is the number of different reductions.
/// void *red_array[] = {privatized_var_1, privatized_var_2, ...};
/// switch(__kmpc_reduce(..., N, /*size of data in red array*/, red_array,
/// _omp_reduction_func,
/// _gomp_critical_user.reduction.var)) {
/// case 1: {
/// var_1 = var_1 <reduction-op> privatized_var_1;
/// var_2 = var_2 <reduction-op> privatized_var_2;
/// // ...
/// __kmpc_end_reduce(...);
/// break;
/// }
/// case 2: {
/// _Atomic<ReductionOp>(var_1, privatized_var_1);
/// _Atomic<ReductionOp>(var_2, privatized_var_2);
/// // ...
/// break;
/// }
/// default: break;
/// }
/// }
///
/// void _omp_reduction_func(void **lhs, void **rhs) {
/// *(type *)lhs[0] = *(type *)lhs[0] <reduction-op> *(type *)rhs[0];
/// *(type *)lhs[1] = *(type *)lhs[1] <reduction-op> *(type *)rhs[1];
/// // ...
/// }
/// \endcode
///
/// \param Loc The location where the reduction was
/// encountered. Must be within the associate
/// directive and after the last local access to the
/// reduction variables.
/// \param AllocaIP An insertion point suitable for allocas usable
/// in reductions.
/// \param ReductionInfos A list of info on each reduction variable.
/// \param IsNoWait A flag set if the reduction is marked as nowait.
InsertPointTy createReductions(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<ReductionInfo> ReductionInfos,
bool IsNoWait = false);
///}
/// Return the insertion point used by the underlying IRBuilder.
InsertPointTy getInsertionPoint() { return Builder.saveIP(); }
/// Update the internal location to \p Loc.
bool updateToLocation(const LocationDescription &Loc) {
Builder.restoreIP(Loc.IP);
Builder.SetCurrentDebugLocation(Loc.DL);
return Loc.IP.getBlock() != nullptr;
}
/// Return the function declaration for the runtime function with \p FnID.
FunctionCallee getOrCreateRuntimeFunction(Module &M,
omp::RuntimeFunction FnID);
Function *getOrCreateRuntimeFunctionPtr(omp::RuntimeFunction FnID);
/// Return the (LLVM-IR) string describing the source location \p LocStr.
Constant *getOrCreateSrcLocStr(StringRef LocStr);
/// Return the (LLVM-IR) string describing the default source location.
Constant *getOrCreateDefaultSrcLocStr();
/// Return the (LLVM-IR) string describing the source location identified by
/// the arguments.
Constant *getOrCreateSrcLocStr(StringRef FunctionName, StringRef FileName,
unsigned Line, unsigned Column);
/// Return the (LLVM-IR) string describing the DebugLoc \p DL. Use \p F as
/// fallback if \p DL does not specify the function name.
Constant *getOrCreateSrcLocStr(DebugLoc DL, Function *F = nullptr);
/// Return the (LLVM-IR) string describing the source location \p Loc.
Constant *getOrCreateSrcLocStr(const LocationDescription &Loc);
/// Return an ident_t* encoding the source location \p SrcLocStr and \p Flags.
/// TODO: Create a enum class for the Reserve2Flags
Value *getOrCreateIdent(Constant *SrcLocStr,
omp::IdentFlag Flags = omp::IdentFlag(0),
unsigned Reserve2Flags = 0);
/// Create a global flag \p Namein the module with initial value \p Value.
GlobalValue *createGlobalFlag(unsigned Value, StringRef Name);
/// Generate control flow and cleanup for cancellation.
///
/// \param CancelFlag Flag indicating if the cancellation is performed.
/// \param CanceledDirective The kind of directive that is cancled.
/// \param ExitCB Extra code to be generated in the exit block.
void emitCancelationCheckImpl(Value *CancelFlag,
omp::Directive CanceledDirective,
FinalizeCallbackTy ExitCB = {});
/// Generate a barrier runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
/// \param DK The directive which caused the barrier
/// \param ForceSimpleCall Flag to force a simple (=non-cancellation) barrier.
/// \param CheckCancelFlag Flag to indicate a cancel barrier return value
/// should be checked and acted upon.
///
/// \returns The insertion point after the barrier.
InsertPointTy emitBarrierImpl(const LocationDescription &Loc,
omp::Directive DK, bool ForceSimpleCall,
bool CheckCancelFlag);
/// Generate a flush runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitFlush(const LocationDescription &Loc);
/// The finalization stack made up of finalize callbacks currently in-flight,
/// wrapped into FinalizationInfo objects that reference also the finalization
/// target block and the kind of cancellable directive.
SmallVector<FinalizationInfo, 8> FinalizationStack;
/// Return true if the last entry in the finalization stack is of kind \p DK
/// and cancellable.
bool isLastFinalizationInfoCancellable(omp::Directive DK) {
return !FinalizationStack.empty() &&
FinalizationStack.back().IsCancellable &&
FinalizationStack.back().DK == DK;
}
/// Generate a taskwait runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskwaitImpl(const LocationDescription &Loc);
/// Generate a taskyield runtime call.
///
/// \param Loc The location at which the request originated and is fulfilled.
void emitTaskyieldImpl(const LocationDescription &Loc);
/// Return the current thread ID.
///
/// \param Ident The ident (ident_t*) describing the query origin.
Value *getOrCreateThreadID(Value *Ident);
/// The underlying LLVM-IR module
Module &M;
/// The LLVM-IR Builder used to create IR.
IRBuilder<> Builder;
/// Map to remember source location strings
StringMap<Constant *> SrcLocStrMap;
/// Map to remember existing ident_t*.
DenseMap<std::pair<Constant *, uint64_t>, Value *> IdentMap;
/// Helper that contains information about regions we need to outline
/// during finalization.
struct OutlineInfo {
using PostOutlineCBTy = std::function<void(Function &)>;
PostOutlineCBTy PostOutlineCB;
BasicBlock *EntryBB, *ExitBB;
/// Collect all blocks in between EntryBB and ExitBB in both the given
/// vector and set.
void collectBlocks(SmallPtrSetImpl<BasicBlock *> &BlockSet,
SmallVectorImpl<BasicBlock *> &BlockVector);
/// Return the function that contains the region to be outlined.
Function *getFunction() const { return EntryBB->getParent(); }
};
/// Collection of regions that need to be outlined during finalization.
SmallVector<OutlineInfo, 16> OutlineInfos;
/// Collection of owned canonical loop objects that eventually need to be
/// free'd.
std::forward_list<CanonicalLoopInfo> LoopInfos;
/// Add a new region that will be outlined later.
void addOutlineInfo(OutlineInfo &&OI) { OutlineInfos.emplace_back(OI); }
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
StringMap<AssertingVH<Constant>, BumpPtrAllocator> InternalVars;
/// Create the global variable holding the offload mappings information.
GlobalVariable *createOffloadMaptypes(SmallVectorImpl<uint64_t> &Mappings,
std::string VarName);
/// Create the global variable holding the offload names information.
GlobalVariable *
createOffloadMapnames(SmallVectorImpl<llvm::Constant *> &Names,
std::string VarName);
struct MapperAllocas {
AllocaInst *ArgsBase = nullptr;
AllocaInst *Args = nullptr;
AllocaInst *ArgSizes = nullptr;
};
/// Create the allocas instruction used in call to mapper functions.
void createMapperAllocas(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumOperands,
struct MapperAllocas &MapperAllocas);
/// Create the call for the target mapper function.
/// \param Loc The source location description.
/// \param MapperFunc Function to be called.
/// \param SrcLocInfo Source location information global.
/// \param MaptypesArg The argument types.
/// \param MapnamesArg The argument names.
/// \param MapperAllocas The AllocaInst used for the call.
/// \param DeviceID Device ID for the call.
/// \param NumOperands Number of operands in the call.
void emitMapperCall(const LocationDescription &Loc, Function *MapperFunc,
Value *SrcLocInfo, Value *MaptypesArg, Value *MapnamesArg,
struct MapperAllocas &MapperAllocas, int64_t DeviceID,
unsigned NumOperands);
public:
/// Generator for __kmpc_copyprivate
///
/// \param Loc The source location description.
/// \param BufSize Number of elements in the buffer.
/// \param CpyBuf List of pointers to data to be copied.
/// \param CpyFn function to call for copying data.
/// \param DidIt flag variable; 1 for 'single' thread, 0 otherwise.
///
/// \return The insertion position *after* the CopyPrivate call.
InsertPointTy createCopyPrivate(const LocationDescription &Loc,
llvm::Value *BufSize, llvm::Value *CpyBuf,
llvm::Value *CpyFn, llvm::Value *DidIt);
/// Generator for '#omp single'
///
/// \param Loc The source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param DidIt Local variable used as a flag to indicate 'single' thread
///
/// \returns The insertion position *after* the single call.
InsertPointTy createSingle(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, llvm::Value *DidIt);
/// Generator for '#omp master'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
///
/// \returns The insertion position *after* the master.
InsertPointTy createMaster(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generator for '#omp masked'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finialize variable copies.
///
/// \returns The insertion position *after* the masked.
InsertPointTy createMasked(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, Value *Filter);
/// Generator for '#omp critical'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \param CriticalName name of the lock used by the critical directive
/// \param HintInst Hint Instruction for hint clause associated with critical
///
/// \returns The insertion position *after* the critical.
InsertPointTy createCritical(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
StringRef CriticalName, Value *HintInst);
/// Generator for '#omp ordered depend (source | sink)'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion point to be used for alloca instructions.
/// \param NumLoops The number of loops in depend clause.
/// \param StoreValues The value will be stored in vector address.
/// \param Name The name of alloca instruction.
/// \param IsDependSource If true, depend source; otherwise, depend sink.
///
/// \return The insertion position *after* the ordered.
InsertPointTy createOrderedDepend(const LocationDescription &Loc,
InsertPointTy AllocaIP, unsigned NumLoops,
ArrayRef<llvm::Value *> StoreValues,
const Twine &Name, bool IsDependSource);
/// Generator for '#omp ordered [threads | simd]'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region code.
/// \param FiniCB Callback to finalize variable copies.
/// \param IsThreads If true, with threads clause or without clause;
/// otherwise, with simd clause;
///
/// \returns The insertion position *after* the ordered.
InsertPointTy createOrderedThreadsSimd(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB,
bool IsThreads);
/// Generator for '#omp sections'
///
/// \param Loc The insert and source location description.
/// \param AllocaIP The insertion points to be used for alloca instructions.
/// \param SectionCBs Callbacks that will generate body of each section.
/// \param PrivCB Callback to copy a given variable (think copy constructor).
/// \param FiniCB Callback to finalize variable copies.
/// \param IsCancellable Flag to indicate a cancellable parallel region.
/// \param IsNowait If true, barrier - to ensure all sections are executed
/// before moving forward will not be generated.
/// \returns The insertion position *after* the sections.
InsertPointTy createSections(const LocationDescription &Loc,
InsertPointTy AllocaIP,
ArrayRef<StorableBodyGenCallbackTy> SectionCBs,
PrivatizeCallbackTy PrivCB,
FinalizeCallbackTy FiniCB, bool IsCancellable,
bool IsNowait);
/// Generator for '#omp section'
///
/// \param Loc The insert and source location description.
/// \param BodyGenCB Callback that will generate the region body code.
/// \param FiniCB Callback to finalize variable copies.
/// \returns The insertion position *after* the section.
InsertPointTy createSection(const LocationDescription &Loc,
BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB);
/// Generate conditional branch and relevant BasicBlocks through which private
/// threads copy the 'copyin' variables from Master copy to threadprivate
/// copies.
///
/// \param IP insertion block for copyin conditional
/// \param MasterVarPtr a pointer to the master variable
/// \param PrivateVarPtr a pointer to the threadprivate variable
/// \param IntPtrTy Pointer size type
/// \param BranchtoEnd Create a branch between the copyin.not.master blocks
// and copy.in.end block
///
/// \returns The insertion point where copying operation to be emitted.
InsertPointTy createCopyinClauseBlocks(InsertPointTy IP, Value *MasterAddr,
Value *PrivateAddr,
llvm::IntegerType *IntPtrTy,
bool BranchtoEnd = true);
/// Create a runtime call for kmpc_Alloc
///
/// \param Loc The insert and source location description.
/// \param Size Size of allocated memory space
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_alloc
///
/// \returns CallInst to the OMP_Alloc call
CallInst *createOMPAlloc(const LocationDescription &Loc, Value *Size,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_free
///
/// \param Loc The insert and source location description.
/// \param Addr Address of memory space to be freed
/// \param Allocator Allocator information instruction
/// \param Name Name of call Instruction for OMP_Free
///
/// \returns CallInst to the OMP_Free call
CallInst *createOMPFree(const LocationDescription &Loc, Value *Addr,
Value *Allocator, std::string Name = "");
/// Create a runtime call for kmpc_threadprivate_cached
///
/// \param Loc The insert and source location description.
/// \param Pointer pointer to data to be cached
/// \param Size size of data to be cached
/// \param Name Name of call Instruction for callinst
///
/// \returns CallInst to the thread private cache call.
CallInst *createCachedThreadPrivate(const LocationDescription &Loc,
llvm::Value *Pointer,
llvm::ConstantInt *Size,
const llvm::Twine &Name = Twine(""));
/// The `omp target` interface
///
/// For more information about the usage of this interface,
/// \see openmp/libomptarget/deviceRTLs/common/include/target.h
///
///{
/// Create a runtime call for kmpc_target_init
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
InsertPointTy createTargetInit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
/// Create a runtime call for kmpc_target_deinit
///
/// \param Loc The insert and source location description.
/// \param IsSPMD Flag to indicate if the kernel is an SPMD kernel or not.
/// \param RequiresFullRuntime Indicate if a full device runtime is necessary.
void createTargetDeinit(const LocationDescription &Loc, bool IsSPMD,
bool RequiresFullRuntime);
///}
/// Declarations for LLVM-IR types (simple, array, function and structure) are
/// generated below. Their names are defined and used in OpenMPKinds.def. Here
/// we provide the declarations, the initializeTypes function will provide the
/// values.
///
///{
#define OMP_TYPE(VarName, InitValue) Type *VarName = nullptr;
#define OMP_ARRAY_TYPE(VarName, ElemTy, ArraySize) \
ArrayType *VarName##Ty = nullptr; \
PointerType *VarName##PtrTy = nullptr;
#define OMP_FUNCTION_TYPE(VarName, IsVarArg, ReturnType, ...) \
FunctionType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#define OMP_STRUCT_TYPE(VarName, StrName, ...) \
StructType *VarName = nullptr; \
PointerType *VarName##Ptr = nullptr;
#include "llvm/Frontend/OpenMP/OMPKinds.def"
///}
private:
/// Create all simple and struct types exposed by the runtime and remember
/// the llvm::PointerTypes of them for easy access later.
void initializeTypes(Module &M);
/// Common interface for generating entry calls for OMP Directives.
/// if the directive has a region/body, It will set the insertion
/// point to the body
///
/// \param OMPD Directive to generate entry blocks for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitBB block where the region ends.
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveEntry(omp::Directive OMPD, Value *EntryCall,
BasicBlock *ExitBB,
bool Conditional = false);
/// Common interface to finalize the region
///
/// \param OMPD Directive to generate exiting code for
/// \param FinIP Insertion point for emitting Finalization code and exit call
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
///
/// \return The insertion position in exit block
InsertPointTy emitCommonDirectiveExit(omp::Directive OMPD,
InsertPointTy FinIP,
Instruction *ExitCall,
bool HasFinalize = true);
/// Common Interface to generate OMP inlined regions
///
/// \param OMPD Directive to generate inlined region for
/// \param EntryCall Call to the entry OMP Runtime Function
/// \param ExitCall Call to the ending OMP Runtime Function
/// \param BodyGenCB Body code generation callback.
/// \param FiniCB Finalization Callback. Will be called when finalizing region
/// \param Conditional indicate if the entry call result will be used
/// to evaluate a conditional of whether a thread will execute
/// body code or not.
/// \param HasFinalize indicate if the directive will require finalization
/// and has a finalization callback in the stack that
/// should be called.
/// \param IsCancellable if HasFinalize is set to true, indicate if the
/// the directive should be cancellable.
/// \return The insertion point after the region
InsertPointTy
EmitOMPInlinedRegion(omp::Directive OMPD, Instruction *EntryCall,
Instruction *ExitCall, BodyGenCallbackTy BodyGenCB,
FinalizeCallbackTy FiniCB, bool Conditional = false,
bool HasFinalize = true, bool IsCancellable = false);
/// Get the platform-specific name separator.
/// \param Parts different parts of the final name that needs separation
/// \param FirstSeparator First separator used between the initial two
/// parts of the name.
/// \param Separator separator used between all of the rest consecutive
/// parts of the name
static std::string getNameWithSeparators(ArrayRef<StringRef> Parts,
StringRef FirstSeparator,
StringRef Separator);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
Constant *getOrCreateOMPInternalVariable(Type *Ty, const Twine &Name,
unsigned AddressSpace = 0);
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
Value *getOMPCriticalRegionLock(StringRef CriticalName);
/// Callback type for Atomic Expression update
/// ex:
/// \code{.cpp}
/// unsigned x = 0;
/// #pragma omp atomic update
/// x = Expr(x_old); //Expr() is any legal operation
/// \endcode
///
/// \param XOld the value of the atomic memory address to use for update
/// \param IRB reference to the IRBuilder to use
///
/// \returns Value to update X to.
using AtomicUpdateCallbackTy =
const function_ref<Value *(Value *XOld, IRBuilder<> &IRB)>;
private:
enum AtomicKind { Read, Write, Update, Capture };
/// Determine whether to emit flush or not
///
/// \param Loc The insert and source location description.
/// \param AO The required atomic ordering
/// \param AK The OpenMP atomic operation kind used.
///
/// \returns wether a flush was emitted or not
bool checkAndEmitFlushAfterAtomic(const LocationDescription &Loc,
AtomicOrdering AO, AtomicKind AK);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW,
/// or belong to {FADD, FSUB, BAD_BINOP}.
/// Then a `cmpExch` based atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param VolatileX true if \a X volatile?
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \returns A pair of the old value of X before the update, and the value
/// used for the update.
std::pair<Value *, Value *> emitAtomicUpdate(Instruction *AllocIP, Value *X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool VolatileX,
bool IsXLHSInRHSPart);
/// Emit the binary op. described by \p RMWOp, using \p Src1 and \p Src2 .
///
/// \Return The instruction
Value *emitRMWOpAsInstruction(Value *Src1, Value *Src2,
AtomicRMWInst::BinOp RMWOp);
public:
/// a struct to pack relevant information while generating atomic Ops
struct AtomicOpValue {
Value *Var = nullptr;
bool IsSigned = false;
bool IsVolatile = false;
};
/// Emit atomic Read for : V = X --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically read
/// \param V Memory address where to store atomically read
/// value
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic read IR.
InsertPointTy createAtomicRead(const LocationDescription &Loc,
AtomicOpValue &X, AtomicOpValue &V,
AtomicOrdering AO);
/// Emit atomic write for : X = Expr --- Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param X The target pointer to be atomically written to
/// \param Expr The value to store.
/// \param AO Atomic ordering of the generated atomic
/// instructions.
///
/// \return Insertion point after generated atomic Write IR.
InsertPointTy createAtomicWrite(const LocationDescription &Loc,
AtomicOpValue &X, Value *Expr,
AtomicOrdering AO);
/// Emit atomic update for constructs: X = X BinOp Expr ,or X = Expr BinOp X
/// For complex Operations: X = UpdateOp(X) => CmpExch X, old_X, UpdateOp(X)
/// Only Scalar data types.
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions.
/// \param RMWOp The binary operation used for update. If operation
/// is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a `cmpExch` based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param IsXLHSInRHSPart true if \a X is Left H.S. in Right H.S. part of
/// the update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
///
/// \return Insertion point after generated atomic update IR.
InsertPointTy createAtomicUpdate(const LocationDescription &Loc,
Instruction *AllocIP, AtomicOpValue &X,
Value *Expr, AtomicOrdering AO,
AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp,
bool IsXLHSInRHSPart);
/// Emit atomic update for constructs: --- Only Scalar data types
/// V = X; X = X BinOp Expr ,
/// X = X BinOp Expr; V = X,
/// V = X; X = Expr BinOp X,
/// X = Expr BinOp X; V = X,
/// V = X; X = UpdateOp(X),
/// X = UpdateOp(X); V = X,
///
/// \param Loc The insert and source location description.
/// \param AllocIP Instruction to create AllocaInst before.
/// \param X The target atomic pointer to be updated
/// \param V Memory address where to store captured value
/// \param Expr The value to update X with.
/// \param AO Atomic ordering of the generated atomic instructions
/// \param RMWOp The binary operation used for update. If
/// operation is not supported by atomicRMW, or belong to
/// {FADD, FSUB, BAD_BINOP}. Then a cmpExch based
/// atomic will be generated.
/// \param UpdateOp Code generator for complex expressions that cannot be
/// expressed through atomicrmw instruction.
/// \param UpdateExpr true if X is an in place update of the form
/// X = X BinOp Expr or X = Expr BinOp X
/// \param IsXLHSInRHSPart true if X is Left H.S. in Right H.S. part of the
/// update expression, false otherwise.
/// (e.g. true for X = X BinOp Expr)
/// \param IsPostfixUpdate true if original value of 'x' must be stored in
/// 'v', not an updated one.
///
/// \return Insertion point after generated atomic capture IR.
InsertPointTy
createAtomicCapture(const LocationDescription &Loc, Instruction *AllocIP,
AtomicOpValue &X, AtomicOpValue &V, Value *Expr,
AtomicOrdering AO, AtomicRMWInst::BinOp RMWOp,
AtomicUpdateCallbackTy &UpdateOp, bool UpdateExpr,
bool IsPostfixUpdate, bool IsXLHSInRHSPart);
/// Create the control flow structure of a canonical OpenMP loop.
///
/// The emitted loop will be disconnected, i.e. no edge to the loop's
/// preheader and no terminator in the AfterBB. The OpenMPIRBuilder's
/// IRBuilder location is not preserved.
///
/// \param DL DebugLoc used for the instructions in the skeleton.
/// \param TripCount Value to be used for the trip count.
/// \param F Function in which to insert the BasicBlocks.
/// \param PreInsertBefore Where to insert BBs that execute before the body,
/// typically the body itself.
/// \param PostInsertBefore Where to insert BBs that execute after the body.
/// \param Name Base name used to derive BB
/// and instruction names.
///
/// \returns The CanonicalLoopInfo that represents the emitted loop.
CanonicalLoopInfo *createLoopSkeleton(DebugLoc DL, Value *TripCount,
Function *F,
BasicBlock *PreInsertBefore,
BasicBlock *PostInsertBefore,
const Twine &Name = {});
};
/// Class to represented the control flow structure of an OpenMP canonical loop.
///
/// The control-flow structure is standardized for easy consumption by
/// directives associated with loops. For instance, the worksharing-loop
/// construct may change this control flow such that each loop iteration is
/// executed on only one thread. The constraints of a canonical loop in brief
/// are:
///
/// * The number of loop iterations must have been computed before entering the
/// loop.
///
/// * Has an (unsigned) logical induction variable that starts at zero and
/// increments by one.
///
/// * The loop's CFG itself has no side-effects. The OpenMP specification
/// itself allows side-effects, but the order in which they happen, including
/// how often or whether at all, is unspecified. We expect that the frontend
/// will emit those side-effect instructions somewhere (e.g. before the loop)
/// such that the CanonicalLoopInfo itself can be side-effect free.
///
/// Keep in mind that CanonicalLoopInfo is meant to only describe a repeated
/// execution of a loop body that satifies these constraints. It does NOT
/// represent arbitrary SESE regions that happen to contain a loop. Do not use
/// CanonicalLoopInfo for such purposes.
///
/// The control flow can be described as follows:
///
/// Preheader
/// |
/// /-> Header
/// | |
/// | Cond---\
/// | | |
/// | Body |
/// | | | |
/// | <...> |
/// | | | |
/// \--Latch |
/// |
/// Exit
/// |
/// After
///
/// The loop is thought to start at PreheaderIP (at the Preheader's terminator,
/// including) and end at AfterIP (at the After's first instruction, excluding).
/// That is, instructions in the Preheader and After blocks (except the
/// Preheader's terminator) are out of CanonicalLoopInfo's control and may have
/// side-effects. Typically, the Preheader is used to compute the loop's trip
/// count. The instructions from BodyIP (at the Body block's first instruction,
/// excluding) until the Latch are also considered outside CanonicalLoopInfo's
/// control and thus can have side-effects. The body block is the single entry
/// point into the loop body, which may contain arbitrary control flow as long
/// as all control paths eventually branch to the Latch block.
///
/// TODO: Consider adding another standardized BasicBlock between Body CFG and
/// Latch to guarantee that there is only a single edge to the latch. It would
/// make loop transformations easier to not needing to consider multiple
/// predecessors of the latch (See redirectAllPredecessorsTo) and would give us
/// an equivalant to PreheaderIP, AfterIP and BodyIP for inserting code that
/// executes after each body iteration.
///
/// There must be no loop-carried dependencies through llvm::Values. This is
/// equivalant to that the Latch has no PHINode and the Header's only PHINode is
/// for the induction variable.
///
/// All code in Header, Cond, Latch and Exit (plus the terminator of the
/// Preheader) are CanonicalLoopInfo's responsibility and their build-up checked
/// by assertOK(). They are expected to not be modified unless explicitly
/// modifying the CanonicalLoopInfo through a methods that applies a OpenMP
/// loop-associated construct such as applyWorkshareLoop, tileLoops, unrollLoop,
/// etc. These methods usually invalidate the CanonicalLoopInfo and re-use its
/// basic blocks. After invalidation, the CanonicalLoopInfo must not be used
/// anymore as its underlying control flow may not exist anymore.
/// Loop-transformation methods such as tileLoops, collapseLoops and unrollLoop
/// may also return a new CanonicalLoopInfo that can be passed to other
/// loop-associated construct implementing methods. These loop-transforming
/// methods may either create a new CanonicalLoopInfo usually using
/// createLoopSkeleton and invalidate the input CanonicalLoopInfo, or reuse and
/// modify one of the input CanonicalLoopInfo and return it as representing the
/// modified loop. What is done is an implementation detail of
/// transformation-implementing method and callers should always assume that the
/// CanonicalLoopInfo passed to it is invalidated and a new object is returned.
/// Returned CanonicalLoopInfo have the same structure and guarantees as the one
/// created by createCanonicalLoop, such that transforming methods do not have
/// to special case where the CanonicalLoopInfo originated from.
///
/// Generally, methods consuming CanonicalLoopInfo do not need an
/// OpenMPIRBuilder::InsertPointTy as argument, but use the locations of the
/// CanonicalLoopInfo to insert new or modify existing instructions. Unless
/// documented otherwise, methods consuming CanonicalLoopInfo do not invalidate
/// any InsertPoint that is outside CanonicalLoopInfo's control. Specifically,
/// any InsertPoint in the Preheader, After or Block can still be used after
/// calling such a method.
///
/// TODO: Provide mechanisms for exception handling and cancellation points.
///
/// Defined outside OpenMPIRBuilder because nested classes cannot be
/// forward-declared, e.g. to avoid having to include the entire OMPIRBuilder.h.
class CanonicalLoopInfo {
friend class OpenMPIRBuilder;
private:
BasicBlock *Preheader = nullptr;
BasicBlock *Header = nullptr;
BasicBlock *Cond = nullptr;
BasicBlock *Body = nullptr;
BasicBlock *Latch = nullptr;
BasicBlock *Exit = nullptr;
BasicBlock *After = nullptr;
/// Add the control blocks of this loop to \p BBs.
///
/// This does not include any block from the body, including the one returned
/// by getBody().
///
/// FIXME: This currently includes the Preheader and After blocks even though
/// their content is (mostly) not under CanonicalLoopInfo's control.
/// Re-evaluated whether this makes sense.
void collectControlBlocks(SmallVectorImpl<BasicBlock *> &BBs);
public:
/// Returns whether this object currently represents the IR of a loop. If
/// returning false, it may have been consumed by a loop transformation or not
/// been intialized. Do not use in this case;
bool isValid() const { return Header; }
/// The preheader ensures that there is only a single edge entering the loop.
/// Code that must be execute before any loop iteration can be emitted here,
/// such as computing the loop trip count and begin lifetime markers. Code in
/// the preheader is not considered part of the canonical loop.
BasicBlock *getPreheader() const {
assert(isValid() && "Requires a valid canonical loop");
return Preheader;
}
/// The header is the entry for each iteration. In the canonical control flow,
/// it only contains the PHINode for the induction variable.
BasicBlock *getHeader() const {
assert(isValid() && "Requires a valid canonical loop");
return Header;
}
/// The condition block computes whether there is another loop iteration. If
/// yes, branches to the body; otherwise to the exit block.
BasicBlock *getCond() const {
assert(isValid() && "Requires a valid canonical loop");
return Cond;
}
/// The body block is the single entry for a loop iteration and not controlled
/// by CanonicalLoopInfo. It can contain arbitrary control flow but must
/// eventually branch to the \p Latch block.
BasicBlock *getBody() const {
assert(isValid() && "Requires a valid canonical loop");
return Body;
}
/// Reaching the latch indicates the end of the loop body code. In the
/// canonical control flow, it only contains the increment of the induction
/// variable.
BasicBlock *getLatch() const {
assert(isValid() && "Requires a valid canonical loop");
return Latch;
}
/// Reaching the exit indicates no more iterations are being executed.
BasicBlock *getExit() const {
assert(isValid() && "Requires a valid canonical loop");
return Exit;
}
/// The after block is intended for clean-up code such as lifetime end
/// markers. It is separate from the exit block to ensure, analogous to the
/// preheader, it having just a single entry edge and being free from PHI
/// nodes should there be multiple loop exits (such as from break
/// statements/cancellations).
BasicBlock *getAfter() const {
assert(isValid() && "Requires a valid canonical loop");
return After;
}
/// Returns the llvm::Value containing the number of loop iterations. It must
/// be valid in the preheader and always interpreted as an unsigned integer of
/// any bit-width.
Value *getTripCount() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *CmpI = &Cond->front();
assert(isa<CmpInst>(CmpI) && "First inst must compare IV with TripCount");
return CmpI->getOperand(1);
}
/// Returns the instruction representing the current logical induction
/// variable. Always unsigned, always starting at 0 with an increment of one.
Instruction *getIndVar() const {
assert(isValid() && "Requires a valid canonical loop");
Instruction *IndVarPHI = &Header->front();
assert(isa<PHINode>(IndVarPHI) && "First inst must be the IV PHI");
return IndVarPHI;
}
/// Return the type of the induction variable (and the trip count).
Type *getIndVarType() const {
assert(isValid() && "Requires a valid canonical loop");
return getIndVar()->getType();
}
/// Return the insertion point for user code before the loop.
OpenMPIRBuilder::InsertPointTy getPreheaderIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Preheader, std::prev(Preheader->end())};
};
/// Return the insertion point for user code in the body.
OpenMPIRBuilder::InsertPointTy getBodyIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {Body, Body->begin()};
};
/// Return the insertion point for user code after the loop.
OpenMPIRBuilder::InsertPointTy getAfterIP() const {
assert(isValid() && "Requires a valid canonical loop");
return {After, After->begin()};
};
Function *getFunction() const {
assert(isValid() && "Requires a valid canonical loop");
return Header->getParent();
}
/// Consistency self-check.
void assertOK() const;
/// Invalidate this loop. That is, the underlying IR does not fulfill the
/// requirements of an OpenMP canonical loop anymore.
void invalidate();
};
} // end namespace llvm
#endif // LLVM_FRONTEND_OPENMP_OMPIRBUILDER_H
|
scene.h | //
// Created by xiaoc on 2018/10/8.
//
#ifndef PATH_TRACER_SCENE_H
#define PATH_TRACER_SCENE_H
#include <vector>
#include <exception>
#include <iostream>
#include "renderobject.h"
#include "util.h"
#include "lib/rand48/erand48.h"
#include "fmt/format.h"
#include "lib/lodepng/lodepng.h"
#include "kdtree.h"
class PathTracerWindow;
class Scene {
std::vector<RenderObject *> objects;
std::vector<RenderObject*> lightSources;
KDTree * kdTree;
unsigned int w, h;
std::vector<Vector3> screen;
Vector3 camPos, camDir;
double sx, sy;
double sampleCount;
bool done;
KDTree* constructKDTree(const AABB& box,const std::vector<RenderObject*>&,unsigned int depth = 0);
public:
friend class PathTracerWindow;
std::vector<unsigned char> pixelBuffer;
Scene(int _w, int _h) : w(_w), h(_h) {
screen.resize(w * h);
camDir = Vector3{0, 0, 1}.norm();
sx = sy = 0;
sampleCount = 0;
done = false;
kdTree = nullptr;
}
void setCamPos(const Vector3 &v) {
camPos = v;
}
void setCamDir(const Vector3 &v) {
camDir = v;
}
void addObject(RenderObject *object) {
objects.emplace_back(object);
}
Hit find0(const Ray &ray){
return kdTree->search(ray);
}
Hit find(const Ray &ray) {
RenderObject *object = nullptr;
double minDist = 99999;
Vector3 norm;
for (auto i:objects) {
auto result = i->intersect(ray);
double d = result.distance;
if (d > eps && minDist > d) {
minDist = d;
if(result.object){
object = result.object;
}else
object = i;
norm = result.normal;
}
}
return {object, minDist, norm};
}
RenderObject *findALightSource();
Vector3 randomVectorInHemisphere(const Vector3 &norm, unsigned short *Xi) {
auto theta0 = erand48(Xi) * 2 * M_PI;
auto theta1 = acos(1 - 2 * erand48(Xi));
auto v = Vector3{
sin(theta0) * sin(theta1), abs(sin(theta1)), -sin(theta0) * cos(theta1)
};
//auto v = Vector3(erand48(Xi) * 2-1,erand48(Xi),erand48(Xi) * 2-1).norm();
auto z = Vector3::cross(norm, abs(norm.y) > 0.1 ? Vector3{1, 0, 0} : Vector3(0, 1, 0)).norm();
auto x = Vector3::cross(norm, z).norm();
v = v.rotate(x, norm, z);
if (v * norm < 0) {
throw std::runtime_error("");
}
return v;
}
void trace(int i,int j, double spp,unsigned short * Xi);
void save();
#if 0
void render(int spp = 4) {
const double xmax = 1, ymax = 1;
double sppSqrt = (double) sqrt(spp);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < w; i++) {
fmt::print("\r rendering ({0}x{0} spp), {1}%", sppSqrt, 100 * (double) i / w);
for (int j = 0; j < h; j++) {
Vector3 color;
for (int sx = 0; sx < sppSqrt; sx++) {
for (int sy = 0; sy < sppSqrt; sy++) {
double x = (double) (i) / w * xmax * 2 - xmax;
double y = (double) (j) / h * ymax * 2 - ymax;
x = x + xmax / w / 2 * ((double) sx / sppSqrt * 2 - 1);
y = y + ymax / h / 2 * ((double) sy / sppSqrt * 2 - 1);
auto dir = Vector3{x, y, 1};//.rotate(camDir);
color += trace(Ray(camPos + dir, dir), 0);
}
}
color *= (1.0 / spp);
screen[(h - j - 1) * w + i] = (color);
}
}
std::vector<unsigned char> pixelBuffer;
for (auto &i: screen) {
pixelBuffer.emplace_back(toInt(i.x));
pixelBuffer.emplace_back(toInt(i.y));
pixelBuffer.emplace_back(toInt(i.z));
pixelBuffer.emplace_back(255);
}
auto err = lodepng::encode("out.png", pixelBuffer, w, h);
if (err) std::cout << "encoder error " << err << ": " << lodepng_error_text(err) << std::endl;
}
#endif
Vector3 trace(const Ray &ray, int depth, unsigned short *Xi);
void renderSamples(int spp = 4, int samples = 1);
void render(int spp, int s = 4) {
spp = sqrt(spp);
spp *= spp;
while (!done) {
renderSamples(spp, s);
}
}
void prepare();
void addLightSource(RenderObject*o){
lightSources.emplace_back(o);
}
};
#endif //PATH_TRACER_SCENE_H
|
GB_binop__fmod_fp32.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_08__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_02__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_04__fmod_fp32)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__fmod_fp32)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__fmod_fp32)
// C+=b function (dense accum): GB (_Cdense_accumb__fmod_fp32)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__fmod_fp32)
// C=scalar+B GB (_bind1st__fmod_fp32)
// C=scalar+B' GB (_bind1st_tran__fmod_fp32)
// C=A+scalar GB (_bind2nd__fmod_fp32)
// C=A'+scalar GB (_bind2nd_tran__fmod_fp32)
// C type: float
// A type: float
// A pattern? 0
// B type: float
// B pattern? 0
// BinaryOp: cij = fmodf (aij, bij)
#define GB_ATYPE \
float
#define GB_BTYPE \
float
#define GB_CTYPE \
float
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
float aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
float bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
float t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = fmodf (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_FMOD || GxB_NO_FP32 || GxB_NO_FMOD_FP32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__fmod_fp32)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type float
float bwork = (*((float *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *restrict Cx = (float *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
float alpha_scalar ;
float beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((float *) alpha_scalar_in)) ;
beta_scalar = (*((float *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__fmod_fp32)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__fmod_fp32)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__fmod_fp32)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float *Cx = (float *) Cx_output ;
float x = (*((float *) x_input)) ;
float *Bx = (float *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
float bij = GBX (Bx, p, false) ;
Cx [p] = fmodf (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__fmod_fp32)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
float *Cx = (float *) Cx_output ;
float *Ax = (float *) Ax_input ;
float y = (*((float *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
float aij = GBX (Ax, p, false) ;
Cx [p] = fmodf (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__fmod_fp32)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
float
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float x = (*((const float *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
float
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
float aij = GBX (Ax, pA, false) ; \
Cx [pC] = fmodf (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__fmod_fp32)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
float y = (*((const float *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
word2vec.h | // Word2Vec C++ 11
// Copyright (C) 2013 Jack Deng <jackdeng@gmail.com>
// MIT License
//
// based on Google Word2Vec and gensim from Radim Rehurek (see http://radimrehurek.com/2013/09/deep-learning-with-word2vec-and-gensim/)
// see main.cc for building instructions
// P.S. compiler vector extension, AVX intrinsics (even with FMA) doesn't seem to help compared to compilation with -Ofast -march=native on a haswell laptop
#ifndef XU_WORD2VEC
#define XU_WORD2VEC
#include <vector>
#include <list>
#include <string>
#include <unordered_map>
#include <tuple>
#include <algorithm>
#include <numeric>
#include <random>
#include <memory>
#include <fstream>
#include <sstream>
#include <chrono>
#include <stdio.h>
#include "v.h"
#include "model_generated.h"
template <typename T> struct Cvt;
template <> struct Cvt<std::string> {
static const std::string& to_utf8(const std::string& s) { return s; }
static const std::string& from_utf8(const std::string& s) { return s; }
};
#if defined(_LIBCPP_BEGIN_NAMESPACE_STD)
#include <codecvt>
template <> struct Cvt<std::u16string> {
static std::string to_utf8(const std::u16string& in) {
std::wstring_convert<std::codecvt_utf8<char16_t>, char16_t> cv;
return cv.to_bytes(in.data());
}
static std::u16string from_utf8(const std::string& in) {
std::wstring_convert<std::codecvt_utf8_utf16<char16_t>, char16_t> cv;
return cv.from_bytes(in.data());
}
};
#else // gcc has no <codecvt>
#include "utf8.h"
template <> struct Cvt<std::u16string> {
static std::string to_utf8(const std::u16string& in) {
std::string out;
utf8::utf16to8(in.begin(), in.end(), std::back_inserter(out));
return out;
}
static std::u16string from_utf8(const std::string& in) {
std::u16string out;
utf8::utf8to16(in.begin(), in.end(), std::back_inserter(out));
return out;
}
};
#endif
template <class String = std::string>
struct Word2Vec
{
enum Tag { S = 0, B, M, E };
static const char *tag_string(Tag t) {
switch(t) {
case S: return "S";
case B: return "B";
case M: return "M";
case E: return "E";
}
}
struct Word
{
int32_t index_;
String text_;
uint32_t count_;
Word *left_, *right_;
std::vector<uint8_t> codes_;
std::vector<uint32_t> points_;
Word(int32_t index, String text, uint32_t count, Word *left = 0, Word *right = 0) : index_(index), text_(text), count_(count), left_(left), right_(right) {}
Word(const Word&) = delete;
const Word& operator = (const Word&) = delete;
};
typedef std::shared_ptr<Word> WordP;
struct Sentence
{
std::vector<Word *> words_;
std::vector<String> tokens_;
std::vector<Tag> tags_;
};
typedef std::shared_ptr<Sentence> SentenceP;
std::vector<Vector> syn0_, syn1_;
std::vector<Vector> syn0norm_;
//negative sampling
std::vector<Vector> syn1neg_;
std::vector<int> unigram_;
std::unordered_map<String, WordP> vocab_;
std::vector<Word *> words_;
int layer1_size_;
int window_;
//subsampling
float sample_;
int min_count_;
int negative_;
float alpha_, min_alpha_;
bool phrase_;
float phrase_threshold_;
Word2Vec(int size = 100, int window = 5, float sample = 0.001, int min_count = 5, int negative = 0, float alpha = 0.025, float min_alpha = 0.0001)
:layer1_size_(size), window_(window), sample_(sample), min_count_(min_count), negative_(negative)
, alpha_(alpha), min_alpha_(min_alpha)
, phrase_(false), phrase_threshold_(100)
{}
bool has(const String& w) const { return vocab_.find(w) != vocab_.end(); }
int build_vocab(std::vector<SentenceP>& sentences) {
size_t count = 0;
std::unordered_map<String, int> vocab;
auto progress = [&count](const char *type, const std::unordered_map<String, int>& vocab) {
printf("collecting [%s] %lu sentences, %lu distinct %ss, %d %ss\n", type, count, vocab.size(), type,
std::accumulate(vocab.begin(), vocab.end(), 0, [](int x, const std::pair<String, int>& v) { return x + v.second; }), type);
};
for (auto& sentence: sentences) {
++count;
if (count % 10000 == 0) progress("word", vocab);
String last_token;
for (auto& token: sentence->tokens_) {
vocab[token] += 1;
// add bigram phrases
if (phrase_) {
if(!last_token.empty()) vocab[last_token + Cvt<String>::from_utf8("_") + token] += 1;
last_token = token;
}
}
}
progress("word", vocab);
if (phrase_) {
count = 0;
int total_words = std::accumulate(vocab.begin(), vocab.end(), 0, [](int x, const std::pair<String, int>& v) { return x + v.second; });
std::unordered_map<String, int> phrase_vocab;
for (auto& sentence: sentences) {
++count;
if (count % 10000 == 0) progress("phrase", phrase_vocab);
std::vector<String> phrase_tokens;
String last_token;
uint32_t pa = 0, pb = 0, pab = 0;
for (auto& token: sentence->tokens_) {
pb = vocab[token];
if (! last_token.empty()) {
String phrase = last_token + Cvt<String>::from_utf8("_") + token;
pab = vocab[phrase];
float score = 0;
if (pa >= min_count_ && pb >= min_count_ && pab >= min_count_)
score = (pab - min_count_ ) / (float(pa) * pb) * total_words;
if (score > phrase_threshold_) {
phrase_tokens.push_back(phrase);
token.clear();
phrase_vocab[phrase] += 1;
}
else {
phrase_tokens.push_back(last_token);
phrase_vocab[last_token] += 1;
}
}
last_token = token;
pa = pb;
}
if (!last_token.empty()) {
phrase_tokens.push_back(last_token);
phrase_vocab[last_token] += 1;
}
sentence->tokens_.swap(phrase_tokens);
}
progress("phrase", phrase_vocab);
printf("using phrases\n");
vocab.swap(phrase_vocab);
}
int n_words = vocab.size();
if (n_words <= 1) return -1;
words_.reserve(n_words);
auto comp = [](Word *w1, Word *w2) { return w1->count_ > w2->count_; };
for (auto& p: vocab) {
uint32_t count = p.second;
if (count <= min_count_) continue;
auto r = vocab_.emplace(p.first, WordP(new Word{0, p.first, count}));
words_.push_back((r.first->second.get()));
}
std::sort(words_.begin(), words_.end(), comp);
int index = 0;
for (auto& w: words_) w->index_ = index++;
printf("collected %lu distinct words with min_count=%d\n", vocab_.size(), min_count_);
n_words = words_.size();
std::vector<Word *> heap = words_;
std::make_heap(heap.begin(), heap.end(), comp);
std::vector<WordP> tmp;
for (int i=0; i<n_words-1; ++i) {
std::pop_heap(heap.begin(), heap.end(), comp);
auto min1 = heap.back(); heap.pop_back();
std::pop_heap(heap.begin(), heap.end(), comp);
auto min2 = heap.back(); heap.pop_back();
tmp.emplace_back(WordP(new Word{i + n_words, Cvt<String>::from_utf8(""), min1->count_ + min2->count_, min1, min2}));
heap.push_back(tmp.back().get());
std::push_heap(heap.begin(), heap.end(), comp);
}
int max_depth = 0;
std::list<std::tuple<Word *, std::vector<uint32_t>, std::vector<uint8_t>>> stack;
stack.push_back(std::make_tuple(heap[0], std::vector<uint32_t>(), std::vector<uint8_t>()));
count = 0;
while (!stack.empty()) {
auto t = stack.back();
stack.pop_back();
Word *word = std::get<0>(t);
if (word->index_ < n_words) {
word->points_ = std::get<1>(t);
word->codes_ = std::get<2>(t);
max_depth = std::max((int)word->codes_.size(), max_depth);
}
else {
auto points = std::get<1>(t);
points.emplace_back(word->index_ - n_words);
auto codes1 = std::get<2>(t);
auto codes2 = codes1;
codes1.push_back(0); codes2.push_back(1);
stack.emplace_back(std::make_tuple(word->left_, points, codes1));
stack.emplace_back(std::make_tuple(word->right_, points, codes2));
}
}
printf("built huffman tree with maximum node depth %d\n", max_depth);
syn0_.resize(n_words);
syn1_.resize(n_words);
std::default_random_engine eng(::time(NULL));
std::uniform_real_distribution<float> rng(0.0, 1.0);
for (auto& s: syn0_) {
s.resize(layer1_size_);
for (auto& x: s) x = (rng(eng) - 0.5) / layer1_size_;
}
for (auto& s: syn1_) s.resize(layer1_size_);
#if 0
//TODO: verify
if (negative_ > 0) {
syn1neg_.resize(n_words);
for (auto& s: syn1neg_) s.resize(layer1_size_);
unigram_.resize(1e8);
const float power = 0.75;
float sum = std::accumulate(words_.begin(), words_.end(), 0.0, [&power](float x, Word *word) { return x + ::pow(word->count_, power); });
float d1 = ::pow(words_[0]->count_, power) / sum;
int i = 0;
for (int a=0; a<unigram_.size(); ++a) {
unigram_[a] = i;
if (float(a) / unigram_.size() > d1) {
++i; d1 += ::pow(words_[i]->count_, power) / sum;
}
if (i >= words_.size()) i = words_.size() - 1;
}
}
#endif
return 0;
}
int train(std::vector<SentenceP>& sentences, int n_workers) {
int total_words = std::accumulate(vocab_.begin(), vocab_.end(), 0,
[](int x, const std::pair<String, WordP>& p) { return (int)(x + p.second->count_); });
int current_words = 0;
float alpha0 = alpha_, min_alpha = min_alpha_;
std::default_random_engine eng(::time(NULL));
std::uniform_real_distribution<float> rng(0.0, 1.0);
int n_sentences = sentences.size();
size_t last_words = 0;
auto cstart = std::chrono::high_resolution_clock::now();
printf("training %i sentences\n", n_sentences);
#pragma omp parallel for
for (size_t i=0; i <n_sentences; ++i) {
auto sentence = sentences[i].get();
if (sentence->tokens_.empty())
continue;
size_t len = sentence->tokens_.size();
for (size_t i=0; i<len; ++i) {
auto it = vocab_.find(sentence->tokens_[i]);
if (it == vocab_.end()) continue;
Word *word = it->second.get();
// subsampling
if (sample_ > 0) {
float rnd = (sqrt(word->count_ / (sample_ * total_words)) + 1) * (sample_ * total_words) / word->count_;
if (rnd < rng(eng)) continue;
}
sentence->words_.emplace_back(it->second.get());
}
float alpha = std::max(min_alpha, float(alpha0 * (1.0 - 1.0 * current_words / total_words)));
Vector work(layer1_size_);
size_t words = train_sentence(*sentence, alpha, work);
#pragma omp atomic
current_words += words;
if (current_words - last_words > 1024 * 100 || i == n_sentences - 1) {
auto cend = std::chrono::high_resolution_clock::now();
auto duration = std::chrono::duration_cast<std::chrono::microseconds>(cend - cstart).count();
printf("training alpha: %.4f progress: %.2f%% words per sec: %.3fK\n", alpha, current_words * 100.0/total_words, (current_words - last_words) * 1000.0 / duration);
last_words = current_words;
cstart = cend;
}
}
syn0norm_ = syn0_;
for (auto& v: syn0norm_) v::unit(v);
return 0;
}
int save(const std::string& file) const {
flatbuffers::FlatBufferBuilder fbb;
std::vector<Word *> words = words_;
std::sort(words.begin(), words.end(), [](Word *w1, Word *w2) { return w1->count_ > w2->count_; });
std::vector<flatbuffers::Offset<word2vec::Word>> ws;
for (auto w: words) {
auto name = fbb.CreateString(Cvt<String>::to_utf8(w->text_));
ws.push_back(word2vec::CreateWord(fbb, name, fbb.CreateVector(syn0_[w->index_])));
}
auto dict = word2vec::CreateDict(fbb, fbb.CreateVector(ws.data(), ws.size()));
fbb.Finish(dict);
std::ofstream out(file, std::ofstream::out | std::ofstream::binary);
out.write((const char *)fbb.GetBufferPointer(), fbb.GetSize());
return 0;
}
int save_text(const std::string& file) const {
std::ofstream out(file, std::ofstream::out);
out << syn0_.size() << " " << syn0_[0].size() << std::endl;
std::vector<Word *> words = words_;
std::sort(words.begin(), words.end(), [](Word *w1, Word *w2) { return w1->count_ > w2->count_; });
for (auto w: words) {
out << Cvt<String>::to_utf8(w->text_);
for (auto i: syn0_[w->index_]) out << " " << i;
out << std::endl;
}
return 0;
}
int load(const std::string& file) {
std::ifstream in(file, std::ifstream::binary);
std::stringstream ss;
ss << in.rdbuf();
std::string s = ss.str();
const word2vec::Dict *dict = word2vec::GetDict(s.data());
int n_words = dict->words()->Length();
syn0_.clear(); vocab_.clear(); words_.clear();
syn0_.resize(n_words);
for (int i=0; i<n_words; ++i) {
const auto *word = dict->words()->Get(i);
auto name = Cvt<String>::from_utf8(word->name()->c_str());
auto p = vocab_.emplace(name, std::make_shared<Word>(i, name, 0));
words_.push_back(p.first->second.get());
syn0_[i] = std::vector<float>{word->feature()->begin(), word->feature()->end()};
}
layer1_size_ = syn0_[0].size();
printf("%i words loaded\n", n_words);
syn0norm_ = syn0_;
for (auto& v: syn0norm_) v::unit(v);
return 0;
}
int load_text(const std::string& file) {
std::ifstream in(file);
std::string line;
if (! std::getline(in, line)) return -1;
int n_words = 0, layer1_size = 0;
std::istringstream iss(line);
iss >> n_words >> layer1_size;
syn0_.clear(); vocab_.clear(); words_.clear();
syn0_.resize(n_words);
for (int i=0; i<n_words; ++i) {
if (! std::getline(in, line)) return -1;
std::istringstream iss(line);
std::string text;
iss >> text;
auto p = vocab_.emplace(Cvt<String>::from_utf8(text), WordP(new Word{i, Cvt<String>::from_utf8(text), 0}));
words_.push_back(p.first->second.get());
syn0_[i].resize(layer1_size);
for(int j=0; j<layer1_size; ++j) {
iss >> syn0_[i][j];
}
}
layer1_size_ = layer1_size;
printf("%d words loaded\n", n_words);
syn0norm_ = syn0_;
for (auto& v: syn0norm_) v::unit(v);
return 0;
}
const Vector& word_vector(const String& w) const {
static Vector nil;
auto it = vocab_.find(w);
if (it == vocab_.end()) return nil;
return syn0_[it->second->index_];
}
size_t word_vector_size() const { return layer1_size_; }
std::vector<std::pair<String,float>> most_similar(std::vector<String> positive, std::vector<String> negative, int topn) {
if ((positive.empty() && negative.empty()) || syn0norm_.empty()) return std::vector<std::pair<String,float>>{};
Vector mean(layer1_size_);
std::vector<int> all_words;
auto add_word = [&mean, &all_words, this](const String& w, float weight) {
auto it = vocab_.find(w);
if (it == vocab_.end()) return;
Word& word = *it->second;
v::saxpy(mean, weight, syn0norm_[word.index_]);
all_words.push_back(word.index_);
};
for (auto& w: positive) add_word(w, 1.0);
for (auto& w: negative) add_word(w, -1.0);
v::unit(mean);
Vector dists;
std::vector<int> indexes;
int i=0;
dists.reserve(syn0norm_.size());
indexes.reserve(syn0norm_.size());
for (auto &x: syn0norm_) {
dists.push_back(v::dot(x, mean));
indexes.push_back(i++);
}
auto comp = [&dists](int i, int j) { return dists[i] > dists[j]; };
// std::sort(indexes.begin(), indexes.end(), comp);
int k = std::min(int(topn+all_words.size()), int(indexes.size())-1);
auto first = indexes.begin(), last = indexes.begin() + k, end = indexes.end();
std::make_heap(first, last + 1, comp);
std::pop_heap(first, last + 1, comp);
for (auto it = last + 1; it != end; ++it) {
if (! comp(*it, *first)) continue;
*last = *it;
std::pop_heap(first, last + 1, comp);
}
std::sort_heap(first, last, comp);
std::vector<std::pair<String,float>> results;
for(int i=0, j=0; i<k; ++i) {
if (std::find(all_words.begin(), all_words.end(), indexes[i]) != all_words.end())
continue;
results.push_back(std::make_pair(words_[indexes[i]]->text_, dists[indexes[i]]));
if (++j >= topn) break;
}
return results;
}
float similarity(const String& w1, const String& w2) const {
auto it1 = vocab_.find(w1), it2 = vocab_.find(w2);
if (it1 != vocab_.end() && it2 != vocab_.end())
return v::dot(syn0_[it1->second->index_], syn0_[it2->second->index_]);
return 0;
}
private:
int train_sentence(Sentence& sentence, float alpha, Vector& work) {
const int max_size = 1000;
const float max_exp = 6.0;
const static std::vector<float> table = [&](){
std::vector<float> x(max_size);
for (size_t i=0; i<max_size; ++i) { float f = exp( (i / float(max_size) * 2 -1) * max_exp); x[i] = f / (f + 1); }
return x;
}();
int count = 0;
int len = sentence.words_.size();
int reduced_window = rand() % window_;
for (int i=0; i<len; ++i) {
const Word& current = *sentence.words_[i];
size_t codelen = current.codes_.size();
int j = std::max(0, i - window_ + reduced_window);
int k = std::min(len, i + window_ + 1 - reduced_window);
for (; j < k; ++j) {
const Word *word = sentence.words_[j];
if (j == i || word->codes_.empty())
continue;
int word_index = word->index_;
auto& l1 = syn0_[word_index];
std::fill(work.begin(), work.end(), 0);
for (size_t b=0; b<codelen; ++b) {
int idx = current.points_[b];
auto& l2 = syn1_[idx];
float f = v::dot(l1, l2);
if (f <= -max_exp || f >= max_exp)
continue;
int fi = int((f + max_exp) * (max_size / max_exp / 2.0));
f = table[fi];
// f = sigmoid(f);
float g = (1 - current.codes_[b] - f) * alpha;
v::saxpy(work, g, l2);
v::saxpy(l2, g, l1);
// work += syn1_[idx] * g;
// syn1_[idx] += syn0_[word_index] * g;
}
//negative sampling
#if 0
if (negative_ > 0) {
for (int d = 0; d < negative_ + 1; ++d) {
int label = (d == 0? 1: 0);
int target = 0;
if (d == 0) target = i;
else {
target = unigram_[rand() % unigram_.size()];
if (target == 0) target = rand() % (vocab_.size() - 1) + 1;
if (target == i) continue;
}
auto& l2 = syn1neg_[target];
float f = v::dot(l1, l2), g = 0;
if (f > max_exp) g = (label - 1) * alpha;
else if (f < -max_exp) g = (label - 0) * alpha;
else {
int fi = int((f + max_exp) * (max_size / max_exp / 2.0));
g = (label - table[fi]) * alpha;
}
v::saxpy(work, g, l2);
v::saxpy(l2, g, l1);
}
}
#endif
// syn0_[word_index] += work;
v::saxpy(l1, 1.0, work);
}
++count;
}
return count;
}
};
#endif
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels)
{
register ssize_t
i;
assert(pixels != (PixelChannels **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *image)
{
PixelChannels
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(PixelChannels **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns,
sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) image->columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double)
value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
static Image *AcquireImageCanvas(const Image *images,ExceptionInfo *exception)
{
const Image
*p,
*q;
size_t
columns,
rows;
q=images;
columns=images->columns;
rows=images->rows;
for (p=images; p != (Image *) NULL; p=p->next)
{
if (p->number_channels > q->number_channels)
q=p;
if (p->columns > columns)
columns=p->columns;
if (p->rows > rows)
rows=p->rows;
}
return(CloneImage(q,columns,rows,MagickTrue,exception));
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
PixelTrait traits = GetPixelChannelTraits(next,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(next,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(next);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,q) <= (QuantumRange/2)))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImage)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImage)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) ResetMagickMemory(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) ResetMagickMemory(centroid,0,sizeof(centroid));
(void) ResetMagickMemory(M00,0,sizeof(M00));
(void) ResetMagickMemory(M01,0,sizeof(M01));
(void) ResetMagickMemory(M02,0,sizeof(M02));
(void) ResetMagickMemory(M03,0,sizeof(M03));
(void) ResetMagickMemory(M10,0,sizeof(M10));
(void) ResetMagickMemory(M11,0,sizeof(M11));
(void) ResetMagickMemory(M12,0,sizeof(M12));
(void) ResetMagickMemory(M20,0,sizeof(M20));
(void) ResetMagickMemory(M21,0,sizeof(M21));
(void) ResetMagickMemory(M22,0,sizeof(M22));
(void) ResetMagickMemory(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status,initialize) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) ResetMagickMemory(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel = GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
if (number_bins > MagickEpsilon)
{
channel_statistics[channel].entropy+=-count*MagickLog10(count)/
MagickLog10(number_bins);
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)/MagickLog10(number_bins)/
GetPixelChannels(image);
}
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=AcquireImageCanvas(images,exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(next,p) <= (QuantumRange/2))
{
p+=GetPixelChannels(next);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) <= (QuantumRange/2))
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel = GetPixelChannelChannel(image,i);
PixelTrait traits = GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) <= (QuantumRange/2)))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
morphology.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M M OOO RRRR PPPP H H OOO L OOO GGGG Y Y %
% MM MM O O R R P P H H O O L O O G Y Y %
% M M M O O RRRR PPPP HHHHH O O L O O G GGG Y %
% M M O O R R P H H O O L O O G G Y %
% M M OOO R R P H H OOO LLLLL OOO GGG Y %
% %
% %
% MagickCore Morphology Methods %
% %
% Software Design %
% Anthony Thyssen %
% January 2010 %
% %
% %
% Copyright 1999-2019 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Morphology is the application of various kernels, of any size or shape, to an
% image in various ways (typically binary, but not always).
%
% Convolution (weighted sum or average) is just one specific type of
% morphology. Just one that is very common for image bluring and sharpening
% effects. Not only 2D Gaussian blurring, but also 2-pass 1D Blurring.
%
% This module provides not only a general morphology function, and the ability
% to apply more advanced or iterative morphologies, but also functions for the
% generation of many different types of kernel arrays from user supplied
% arguments. Prehaps even the generation of a kernel from a small image.
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/artifact.h"
#include "magick/cache-view.h"
#include "magick/color-private.h"
#include "magick/channel.h"
#include "magick/enhance.h"
#include "magick/exception.h"
#include "magick/exception-private.h"
#include "magick/gem.h"
#include "magick/hashmap.h"
#include "magick/image.h"
#include "magick/image-private.h"
#include "magick/list.h"
#include "magick/magick.h"
#include "magick/memory_.h"
#include "magick/memory-private.h"
#include "magick/monitor-private.h"
#include "magick/morphology.h"
#include "magick/morphology-private.h"
#include "magick/option.h"
#include "magick/pixel-private.h"
#include "magick/prepress.h"
#include "magick/quantize.h"
#include "magick/registry.h"
#include "magick/resource_.h"
#include "magick/semaphore.h"
#include "magick/splay-tree.h"
#include "magick/statistic.h"
#include "magick/string_.h"
#include "magick/string-private.h"
#include "magick/thread-private.h"
#include "magick/token.h"
#include "magick/utility.h"
/*
Other global definitions used by module.
*/
#define Minimize(assign,value) assign=MagickMin(assign,value)
#define Maximize(assign,value) assign=MagickMax(assign,value)
/* Integer Factorial Function - for a Binomial kernel */
#if 1
static inline size_t fact(size_t n)
{
size_t l,f;
for(f=1, l=2; l <= n; f=f*l, l++);
return(f);
}
#elif 1 /* glibc floating point alternatives */
#define fact(n) ((size_t)tgamma((double)n+1))
#else
#define fact(n) ((size_t)lgamma((double)n+1))
#endif
/* Currently these are only internal to this module */
static void
CalcKernelMetaData(KernelInfo *),
ExpandMirrorKernelInfo(KernelInfo *),
ExpandRotateKernelInfo(KernelInfo *, const double),
RotateKernelInfo(KernelInfo *, double);
/* Quick function to find last kernel in a kernel list */
static inline KernelInfo *LastKernelInfo(KernelInfo *kernel)
{
while (kernel->next != (KernelInfo *) NULL)
kernel=kernel->next;
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% A c q u i r e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelInfo() takes the given string (generally supplied by the
% user) and converts it into a Morphology/Convolution Kernel. This allows
% users to specify a kernel from a number of pre-defined kernels, or to fully
% specify their own kernel for a specific Convolution or Morphology
% Operation.
%
% The kernel so generated can be any rectangular array of floating point
% values (doubles) with the 'control point' or 'pixel being affected'
% anywhere within that array of values.
%
% Previously IM was restricted to a square of odd size using the exact
% center as origin, this is no longer the case, and any rectangular kernel
% with any value being declared the origin. This in turn allows the use of
% highly asymmetrical kernels.
%
% The floating point values in the kernel can also include a special value
% known as 'nan' or 'not a number' to indicate that this value is not part
% of the kernel array. This allows you to shaped the kernel within its
% rectangular area. That is 'nan' values provide a 'mask' for the kernel
% shape. However at least one non-nan value must be provided for correct
% working of a kernel.
%
% The returned kernel should be freed using the DestroyKernelInfo method
% when you are finished with it. Do not free this memory yourself.
%
% Input kernel defintion strings can consist of any of three types.
%
% "name:args[[@><]"
% Select from one of the built in kernels, using the name and
% geometry arguments supplied. See AcquireKernelBuiltIn()
%
% "WxH[+X+Y][@><]:num, num, num ..."
% a kernel of size W by H, with W*H floating point numbers following.
% the 'center' can be optionally be defined at +X+Y (such that +0+0
% is top left corner). If not defined the pixel in the center, for
% odd sizes, or to the immediate top or left of center for even sizes
% is automatically selected.
%
% "num, num, num, num, ..."
% list of floating point numbers defining an 'old style' odd sized
% square kernel. At least 9 values should be provided for a 3x3
% square kernel, 25 for a 5x5 square kernel, 49 for 7x7, etc.
% Values can be space or comma separated. This is not recommended.
%
% You can define a 'list of kernels' which can be used by some morphology
% operators A list is defined as a semi-colon separated list kernels.
%
% " kernel ; kernel ; kernel ; "
%
% Any extra ';' characters, at start, end or between kernel defintions are
% simply ignored.
%
% The special flags will expand a single kernel, into a list of rotated
% kernels. A '@' flag will expand a 3x3 kernel into a list of 45-degree
% cyclic rotations, while a '>' will generate a list of 90-degree rotations.
% The '<' also exands using 90-degree rotates, but giving a 180-degree
% reflected kernel before the +/- 90-degree rotations, which can be important
% for Thinning operations.
%
% Note that 'name' kernels will start with an alphabetic character while the
% new kernel specification has a ':' character in its specification string.
% If neither is the case, it is assumed an old style of a simple list of
% numbers generating a odd-sized square kernel has been given.
%
% The format of the AcquireKernal method is:
%
% KernelInfo *AcquireKernelInfo(const char *kernel_string)
%
% A description of each parameter follows:
%
% o kernel_string: the Morphology/Convolution kernel wanted.
%
*/
/* This was separated so that it could be used as a separate
** array input handling function, such as for -color-matrix
*/
static KernelInfo *ParseKernelArray(const char *kernel_string)
{
KernelInfo
*kernel;
char
token[MaxTextExtent];
const char
*p,
*end;
register ssize_t
i;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
MagickStatusType
flags;
GeometryInfo
args;
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = UserDefinedKernel;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
if (kernel_string == (const char *) NULL)
return(kernel);
/* find end of this specific kernel definition string */
end = strchr(kernel_string, ';');
if ( end == (char *) NULL )
end = strchr(kernel_string, '\0');
/* clear flags - for Expanding kernel lists thorugh rotations */
flags = NoValue;
/* Has a ':' in argument - New user kernel specification
FUTURE: this split on ':' could be done by StringToken()
*/
p = strchr(kernel_string, ':');
if ( p != (char *) NULL && p < end)
{
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, kernel_string, (size_t) (p-kernel_string));
token[p-kernel_string] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
/* Size handling and checks of geometry settings */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 1.0; /* then width = 1 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
kernel->width = (size_t)args.rho;
kernel->height = (size_t)args.sigma;
/* Offset Handling and Checks */
if ( args.xi < 0.0 || args.psi < 0.0 )
return(DestroyKernelInfo(kernel));
kernel->x = ((flags & XValue)!=0) ? (ssize_t)args.xi
: (ssize_t) (kernel->width-1)/2;
kernel->y = ((flags & YValue)!=0) ? (ssize_t)args.psi
: (ssize_t) (kernel->height-1)/2;
if ( kernel->x >= (ssize_t) kernel->width ||
kernel->y >= (ssize_t) kernel->height )
return(DestroyKernelInfo(kernel));
p++; /* advance beyond the ':' */
}
else
{ /* ELSE - Old old specification, forming odd-square kernel */
/* count up number of values given */
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
for (i=0; p < end; i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
}
/* set the size of the kernel - old sized square */
kernel->width = kernel->height= (size_t) sqrt((double) i+1.0);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
p=(const char *) kernel_string;
while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == '\''))
p++; /* ignore "'" chars for convolve filter usage - Cristy */
}
/* Read in the kernel values from rest of input string argument */
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->minimum=MagickMaximumValue;
kernel->maximum=(-MagickMaximumValue);
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; (i < (ssize_t) (kernel->width*kernel->height)) && (p < end); i++)
{
(void) GetNextToken(p,&p,MaxTextExtent,token);
if (*token == ',')
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( LocaleCompare("nan",token) == 0
|| LocaleCompare("-",token) == 0 ) {
kernel->values[i] = nan; /* this value is not part of neighbourhood */
}
else {
kernel->values[i] = StringToDouble(token,(char **) NULL);
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
}
/* sanity check -- no more values in kernel definition */
(void) GetNextToken(p,&p,MaxTextExtent,token);
if ( *token != '\0' && *token != ';' && *token != '\'' )
return(DestroyKernelInfo(kernel));
#if 0
/* this was the old method of handling a incomplete kernel */
if ( i < (ssize_t) (kernel->width*kernel->height) ) {
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
for ( ; i < (ssize_t) (kernel->width*kernel->height); i++)
kernel->values[i]=0.0;
}
#else
/* Number of values for kernel was not enough - Report Error */
if ( i < (ssize_t) (kernel->width*kernel->height) )
return(DestroyKernelInfo(kernel));
#endif
/* check that we recieved at least one real (non-nan) value! */
if (kernel->minimum == MagickMaximumValue)
return(DestroyKernelInfo(kernel));
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel size */
ExpandRotateKernelInfo(kernel, 45.0); /* cyclic rotate 3x3 kernels */
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0); /* 90 degree rotate of kernel */
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel); /* 90 degree mirror rotate */
return(kernel);
}
static KernelInfo *ParseKernelName(const char *kernel_string)
{
char
token[MaxTextExtent];
const char
*p,
*end;
GeometryInfo
args;
KernelInfo
*kernel;
MagickStatusType
flags;
ssize_t
type;
/* Parse special 'named' kernel */
(void) GetNextToken(kernel_string,&p,MaxTextExtent,token);
type=ParseCommandOption(MagickKernelOptions,MagickFalse,token);
if ( type < 0 || type == UserDefinedKernel )
return((KernelInfo *) NULL); /* not a valid named kernel */
while (((isspace((int) ((unsigned char) *p)) != 0) ||
(*p == ',') || (*p == ':' )) && (*p != '\0') && (*p != ';'))
p++;
end = strchr(p, ';'); /* end of this kernel defintion */
if ( end == (char *) NULL )
end = strchr(p, '\0');
/* ParseGeometry() needs the geometry separated! -- Arrgghh */
memcpy(token, p, (size_t) (end-p));
token[end-p] = '\0';
SetGeometryInfo(&args);
flags = ParseGeometry(token, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
/* special handling of missing values in input string */
switch( type ) {
/* Shape Kernel Defaults */
case UnityKernel:
if ( (flags & WidthValue) == 0 )
args.rho = 1.0; /* Default scale = 1.0, zero is valid */
break;
case SquareKernel:
case DiamondKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
if ( (flags & HeightValue) == 0 )
args.sigma = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RingKernel:
if ( (flags & XValue) == 0 )
args.xi = 1.0; /* Default scale = 1.0, zero is valid */
break;
case RectangleKernel: /* Rectangle - set size defaults */
if ( (flags & WidthValue) == 0 ) /* if no width then */
args.rho = args.sigma; /* then width = height */
if ( args.rho < 1.0 ) /* if width too small */
args.rho = 3; /* then width = 3 */
if ( args.sigma < 1.0 ) /* if height too small */
args.sigma = args.rho; /* then height = width */
if ( (flags & XValue) == 0 ) /* center offset if not defined */
args.xi = (double)(((ssize_t)args.rho-1)/2);
if ( (flags & YValue) == 0 )
args.psi = (double)(((ssize_t)args.sigma-1)/2);
break;
/* Distance Kernel Defaults */
case ChebyshevKernel:
case ManhattanKernel:
case OctagonalKernel:
case EuclideanKernel:
if ( (flags & HeightValue) == 0 ) /* no distance scale */
args.sigma = 100.0; /* default distance scaling */
else if ( (flags & AspectValue ) != 0 ) /* '!' flag */
args.sigma = QuantumRange/(args.sigma+1); /* maximum pixel distance */
else if ( (flags & PercentValue ) != 0 ) /* '%' flag */
args.sigma *= QuantumRange/100.0; /* percentage of color range */
break;
default:
break;
}
kernel = AcquireKernelBuiltIn((KernelInfoType)type, &args);
if ( kernel == (KernelInfo *) NULL )
return(kernel);
/* global expand to rotated kernel list - only for single kernels */
if ( kernel->next == (KernelInfo *) NULL ) {
if ( (flags & AreaValue) != 0 ) /* '@' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 45.0);
else if ( (flags & GreaterValue) != 0 ) /* '>' symbol in kernel args */
ExpandRotateKernelInfo(kernel, 90.0);
else if ( (flags & LessValue) != 0 ) /* '<' symbol in kernel args */
ExpandMirrorKernelInfo(kernel);
}
return(kernel);
}
MagickExport KernelInfo *AcquireKernelInfo(const char *kernel_string)
{
KernelInfo
*kernel,
*new_kernel;
char
*kernel_cache,
token[MaxTextExtent];
const char
*p;
if (kernel_string == (const char *) NULL)
return(ParseKernelArray(kernel_string));
p=kernel_string;
kernel_cache=(char *) NULL;
if (*kernel_string == '@')
{
ExceptionInfo *exception=AcquireExceptionInfo();
kernel_cache=FileToString(kernel_string+1,~0UL,exception);
exception=DestroyExceptionInfo(exception);
if (kernel_cache == (char *) NULL)
return((KernelInfo *) NULL);
p=(const char *) kernel_cache;
}
kernel=NULL;
while (GetNextToken(p,(const char **) NULL,MaxTextExtent,token), *token != '\0')
{
/* ignore extra or multiple ';' kernel separators */
if (*token != ';')
{
/* tokens starting with alpha is a Named kernel */
if (isalpha((int) ((unsigned char) *token)) != 0)
new_kernel=ParseKernelName(p);
else /* otherwise a user defined kernel array */
new_kernel=ParseKernelArray(p);
/* Error handling -- this is not proper error handling! */
if (new_kernel == (KernelInfo *) NULL)
{
if (kernel != (KernelInfo *) NULL)
kernel=DestroyKernelInfo(kernel);
return((KernelInfo *) NULL);
}
/* initialise or append the kernel list */
if (kernel == (KernelInfo *) NULL)
kernel=new_kernel;
else
LastKernelInfo(kernel)->next=new_kernel;
}
/* look for the next kernel in list */
p=strchr(p,';');
if (p == (char *) NULL)
break;
p++;
}
if (kernel_cache != (char *) NULL)
kernel_cache=DestroyString(kernel_cache);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ A c q u i r e K e r n e l B u i l t I n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% AcquireKernelBuiltIn() returned one of the 'named' built-in types of
% kernels used for special purposes such as gaussian blurring, skeleton
% pruning, and edge distance determination.
%
% They take a KernelType, and a set of geometry style arguments, which were
% typically decoded from a user supplied string, or from a more complex
% Morphology Method that was requested.
%
% The format of the AcquireKernalBuiltIn method is:
%
% KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
% const GeometryInfo args)
%
% A description of each parameter follows:
%
% o type: the pre-defined type of kernel wanted
%
% o args: arguments defining or modifying the kernel
%
% Convolution Kernels
%
% Unity
% The a No-Op or Scaling single element kernel.
%
% Gaussian:{radius},{sigma}
% Generate a two-dimensional gaussian kernel, as used by -gaussian.
% The sigma for the curve is required. The resulting kernel is
% normalized,
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% NOTE: that the 'radius' is optional, but if provided can limit (clip)
% the final size of the resulting kernel to a square 2*radius+1 in size.
% The radius should be at least 2 times that of the sigma value, or
% sever clipping and aliasing may result. If not given or set to 0 the
% radius will be determined so as to produce the best minimal error
% result, which is usally much larger than is normally needed.
%
% LoG:{radius},{sigma}
% "Laplacian of a Gaussian" or "Mexician Hat" Kernel.
% The supposed ideal edge detection, zero-summing kernel.
%
% An alturnative to this kernel is to use a "DoG" with a sigma ratio of
% approx 1.6 (according to wikipedia).
%
% DoG:{radius},{sigma1},{sigma2}
% "Difference of Gaussians" Kernel.
% As "Gaussian" but with a gaussian produced by 'sigma2' subtracted
% from the gaussian produced by 'sigma1'. Typically sigma2 > sigma1.
% The result is a zero-summing kernel.
%
% Blur:{radius},{sigma}[,{angle}]
% Generates a 1 dimensional or linear gaussian blur, at the angle given
% (current restricted to orthogonal angles). If a 'radius' is given the
% kernel is clipped to a width of 2*radius+1. Kernel can be rotated
% by a 90 degree angle.
%
% If 'sigma' is zero, you get a single pixel on a field of zeros.
%
% Note that two convolutions with two "Blur" kernels perpendicular to
% each other, is equivalent to a far larger "Gaussian" kernel with the
% same sigma value, However it is much faster to apply. This is how the
% "-blur" operator actually works.
%
% Comet:{width},{sigma},{angle}
% Blur in one direction only, much like how a bright object leaves
% a comet like trail. The Kernel is actually half a gaussian curve,
% Adding two such blurs in opposite directions produces a Blur Kernel.
% Angle can be rotated in multiples of 90 degrees.
%
% Note that the first argument is the width of the kernel and not the
% radius of the kernel.
%
% Binomial:[{radius}]
% Generate a discrete kernel using a 2 dimentional Pascel's Triangle
% of values. Used for special forma of image filters
%
% # Still to be implemented...
% #
% # Filter2D
% # Filter1D
% # Set kernel values using a resize filter, and given scale (sigma)
% # Cylindrical or Linear. Is this possible with an image?
% #
%
% Named Constant Convolution Kernels
%
% All these are unscaled, zero-summing kernels by default. As such for
% non-HDRI version of ImageMagick some form of normalization, user scaling,
% and biasing the results is recommended, to prevent the resulting image
% being 'clipped'.
%
% The 3x3 kernels (most of these) can be circularly rotated in multiples of
% 45 degrees to generate the 8 angled varients of each of the kernels.
%
% Laplacian:{type}
% Discrete Lapacian Kernels, (without normalization)
% Type 0 : 3x3 with center:8 surounded by -1 (8 neighbourhood)
% Type 1 : 3x3 with center:4 edge:-1 corner:0 (4 neighbourhood)
% Type 2 : 3x3 with center:4 edge:1 corner:-2
% Type 3 : 3x3 with center:4 edge:-2 corner:1
% Type 5 : 5x5 laplacian
% Type 7 : 7x7 laplacian
% Type 15 : 5x5 LoG (sigma approx 1.4)
% Type 19 : 9x9 LoG (sigma approx 1.4)
%
% Sobel:{angle}
% Sobel 'Edge' convolution kernel (3x3)
% | -1, 0, 1 |
% | -2, 0, 2 |
% | -1, 0, 1 |
%
% Roberts:{angle}
% Roberts convolution kernel (3x3)
% | 0, 0, 0 |
% | -1, 1, 0 |
% | 0, 0, 0 |
%
% Prewitt:{angle}
% Prewitt Edge convolution kernel (3x3)
% | -1, 0, 1 |
% | -1, 0, 1 |
% | -1, 0, 1 |
%
% Compass:{angle}
% Prewitt's "Compass" convolution kernel (3x3)
% | -1, 1, 1 |
% | -1,-2, 1 |
% | -1, 1, 1 |
%
% Kirsch:{angle}
% Kirsch's "Compass" convolution kernel (3x3)
% | -3,-3, 5 |
% | -3, 0, 5 |
% | -3,-3, 5 |
%
% FreiChen:{angle}
% Frei-Chen Edge Detector is based on a kernel that is similar to
% the Sobel Kernel, but is designed to be isotropic. That is it takes
% into account the distance of the diagonal in the kernel.
%
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) |
% | 1, 0, -1 |
%
% FreiChen:{type},{angle}
%
% Frei-Chen Pre-weighted kernels...
%
% Type 0: default un-nomalized version shown above.
%
% Type 1: Orthogonal Kernel (same as type 11 below)
% | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 2: Diagonal form of Kernel...
% | 1, sqrt(2), 0 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 0, -sqrt(2) -1 |
%
% However this kernel is als at the heart of the FreiChen Edge Detection
% Process which uses a set of 9 specially weighted kernel. These 9
% kernels not be normalized, but directly applied to the image. The
% results is then added together, to produce the intensity of an edge in
% a specific direction. The square root of the pixel value can then be
% taken as the cosine of the edge, and at least 2 such runs at 90 degrees
% from each other, both the direction and the strength of the edge can be
% determined.
%
% Type 10: All 9 of the following pre-weighted kernels...
%
% Type 11: | 1, 0, -1 |
% | sqrt(2), 0, -sqrt(2) | / 2*sqrt(2)
% | 1, 0, -1 |
%
% Type 12: | 1, sqrt(2), 1 |
% | 0, 0, 0 | / 2*sqrt(2)
% | 1, sqrt(2), 1 |
%
% Type 13: | sqrt(2), -1, 0 |
% | -1, 0, 1 | / 2*sqrt(2)
% | 0, 1, -sqrt(2) |
%
% Type 14: | 0, 1, -sqrt(2) |
% | -1, 0, 1 | / 2*sqrt(2)
% | sqrt(2), -1, 0 |
%
% Type 15: | 0, -1, 0 |
% | 1, 0, 1 | / 2
% | 0, -1, 0 |
%
% Type 16: | 1, 0, -1 |
% | 0, 0, 0 | / 2
% | -1, 0, 1 |
%
% Type 17: | 1, -2, 1 |
% | -2, 4, -2 | / 6
% | -1, -2, 1 |
%
% Type 18: | -2, 1, -2 |
% | 1, 4, 1 | / 6
% | -2, 1, -2 |
%
% Type 19: | 1, 1, 1 |
% | 1, 1, 1 | / 3
% | 1, 1, 1 |
%
% The first 4 are for edge detection, the next 4 are for line detection
% and the last is to add a average component to the results.
%
% Using a special type of '-1' will return all 9 pre-weighted kernels
% as a multi-kernel list, so that you can use them directly (without
% normalization) with the special "-set option:morphology:compose Plus"
% setting to apply the full FreiChen Edge Detection Technique.
%
% If 'type' is large it will be taken to be an actual rotation angle for
% the default FreiChen (type 0) kernel. As such FreiChen:45 will look
% like a Sobel:45 but with 'sqrt(2)' instead of '2' values.
%
% WARNING: The above was layed out as per
% http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf
% But rotated 90 degrees so direction is from left rather than the top.
% I have yet to find any secondary confirmation of the above. The only
% other source found was actual source code at
% http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf
% Neigher paper defineds the kernels in a way that looks locical or
% correct when taken as a whole.
%
% Boolean Kernels
%
% Diamond:[{radius}[,{scale}]]
% Generate a diamond shaped kernel with given radius to the points.
% Kernel size will again be radius*2+1 square and defaults to radius 1,
% generating a 3x3 kernel that is slightly larger than a square.
%
% Square:[{radius}[,{scale}]]
% Generate a square shaped kernel of size radius*2+1, and defaulting
% to a 3x3 (radius 1).
%
% Octagon:[{radius}[,{scale}]]
% Generate octagonal shaped kernel of given radius and constant scale.
% Default radius is 3 producing a 7x7 kernel. A radius of 1 will result
% in "Diamond" kernel.
%
% Disk:[{radius}[,{scale}]]
% Generate a binary disk, thresholded at the radius given, the radius
% may be a float-point value. Final Kernel size is floor(radius)*2+1
% square. A radius of 5.3 is the default.
%
% NOTE: That a low radii Disk kernels produce the same results as
% many of the previously defined kernels, but differ greatly at larger
% radii. Here is a table of equivalences...
% "Disk:1" => "Diamond", "Octagon:1", or "Cross:1"
% "Disk:1.5" => "Square"
% "Disk:2" => "Diamond:2"
% "Disk:2.5" => "Octagon"
% "Disk:2.9" => "Square:2"
% "Disk:3.5" => "Octagon:3"
% "Disk:4.5" => "Octagon:4"
% "Disk:5.4" => "Octagon:5"
% "Disk:6.4" => "Octagon:6"
% All other Disk shapes are unique to this kernel, but because a "Disk"
% is more circular when using a larger radius, using a larger radius is
% preferred over iterating the morphological operation.
%
% Rectangle:{geometry}
% Simply generate a rectangle of 1's with the size given. You can also
% specify the location of the 'control point', otherwise the closest
% pixel to the center of the rectangle is selected.
%
% Properly centered and odd sized rectangles work the best.
%
% Symbol Dilation Kernels
%
% These kernel is not a good general morphological kernel, but is used
% more for highlighting and marking any single pixels in an image using,
% a "Dilate" method as appropriate.
%
% For the same reasons iterating these kernels does not produce the
% same result as using a larger radius for the symbol.
%
% Plus:[{radius}[,{scale}]]
% Cross:[{radius}[,{scale}]]
% Generate a kernel in the shape of a 'plus' or a 'cross' with
% a each arm the length of the given radius (default 2).
%
% NOTE: "plus:1" is equivalent to a "Diamond" kernel.
%
% Ring:{radius1},{radius2}[,{scale}]
% A ring of the values given that falls between the two radii.
% Defaults to a ring of approximataly 3 radius in a 7x7 kernel.
% This is the 'edge' pixels of the default "Disk" kernel,
% More specifically, "Ring" -> "Ring:2.5,3.5,1.0"
%
% Hit and Miss Kernels
%
% Peak:radius1,radius2
% Find any peak larger than the pixels the fall between the two radii.
% The default ring of pixels is as per "Ring".
% Edges
% Find flat orthogonal edges of a binary shape
% Corners
% Find 90 degree corners of a binary shape
% Diagonals:type
% A special kernel to thin the 'outside' of diagonals
% LineEnds:type
% Find end points of lines (for pruning a skeletion)
% Two types of lines ends (default to both) can be searched for
% Type 0: All line ends
% Type 1: single kernel for 4-conneected line ends
% Type 2: single kernel for simple line ends
% LineJunctions
% Find three line junctions (within a skeletion)
% Type 0: all line junctions
% Type 1: Y Junction kernel
% Type 2: Diagonal T Junction kernel
% Type 3: Orthogonal T Junction kernel
% Type 4: Diagonal X Junction kernel
% Type 5: Orthogonal + Junction kernel
% Ridges:type
% Find single pixel ridges or thin lines
% Type 1: Fine single pixel thick lines and ridges
% Type 2: Find two pixel thick lines and ridges
% ConvexHull
% Octagonal Thickening Kernel, to generate convex hulls of 45 degrees
% Skeleton:type
% Traditional skeleton generating kernels.
% Type 1: Tradional Skeleton kernel (4 connected skeleton)
% Type 2: HIPR2 Skeleton kernel (8 connected skeleton)
% Type 3: Thinning skeleton based on a ressearch paper by
% Dan S. Bloomberg (Default Type)
% ThinSE:type
% A huge variety of Thinning Kernels designed to preserve conectivity.
% many other kernel sets use these kernels as source definitions.
% Type numbers are 41-49, 81-89, 481, and 482 which are based on
% the super and sub notations used in the source research paper.
%
% Distance Measuring Kernels
%
% Different types of distance measuring methods, which are used with the
% a 'Distance' morphology method for generating a gradient based on
% distance from an edge of a binary shape, though there is a technique
% for handling a anti-aliased shape.
%
% See the 'Distance' Morphological Method, for information of how it is
% applied.
%
% Chebyshev:[{radius}][x{scale}[%!]]
% Chebyshev Distance (also known as Tchebychev or Chessboard distance)
% is a value of one to any neighbour, orthogonal or diagonal. One why
% of thinking of it is the number of squares a 'King' or 'Queen' in
% chess needs to traverse reach any other position on a chess board.
% It results in a 'square' like distance function, but one where
% diagonals are given a value that is closer than expected.
%
% Manhattan:[{radius}][x{scale}[%!]]
% Manhattan Distance (also known as Rectilinear, City Block, or the Taxi
% Cab distance metric), it is the distance needed when you can only
% travel in horizontal or vertical directions only. It is the
% distance a 'Rook' in chess would have to travel, and results in a
% diamond like distances, where diagonals are further than expected.
%
% Octagonal:[{radius}][x{scale}[%!]]
% An interleving of Manhatten and Chebyshev metrics producing an
% increasing octagonally shaped distance. Distances matches those of
% the "Octagon" shaped kernel of the same radius. The minimum radius
% and default is 2, producing a 5x5 kernel.
%
% Euclidean:[{radius}][x{scale}[%!]]
% Euclidean distance is the 'direct' or 'as the crow flys' distance.
% However by default the kernel size only has a radius of 1, which
% limits the distance to 'Knight' like moves, with only orthogonal and
% diagonal measurements being correct. As such for the default kernel
% you will get octagonal like distance function.
%
% However using a larger radius such as "Euclidean:4" you will get a
% much smoother distance gradient from the edge of the shape. Especially
% if the image is pre-processed to include any anti-aliasing pixels.
% Of course a larger kernel is slower to use, and not always needed.
%
% The first three Distance Measuring Kernels will only generate distances
% of exact multiples of {scale} in binary images. As such you can use a
% scale of 1 without loosing any information. However you also need some
% scaling when handling non-binary anti-aliased shapes.
%
% The "Euclidean" Distance Kernel however does generate a non-integer
% fractional results, and as such scaling is vital even for binary shapes.
%
*/
MagickExport KernelInfo *AcquireKernelBuiltIn(const KernelInfoType type,
const GeometryInfo *args)
{
KernelInfo
*kernel;
register ssize_t
i;
register ssize_t
u,
v;
double
nan = sqrt((double)-1.0); /* Special Value : Not A Number */
/* Generate a new empty kernel if needed */
kernel=(KernelInfo *) NULL;
switch(type) {
case UndefinedKernel: /* These should not call this function */
case UserDefinedKernel:
assert("Should not call this function" != (char *) NULL);
break;
case LaplacianKernel: /* Named Descrete Convolution Kernels */
case SobelKernel: /* these are defined using other kernels */
case RobertsKernel:
case PrewittKernel:
case CompassKernel:
case KirschKernel:
case FreiChenKernel:
case EdgesKernel: /* Hit and Miss kernels */
case CornersKernel:
case DiagonalsKernel:
case LineEndsKernel:
case LineJunctionsKernel:
case RidgesKernel:
case ConvexHullKernel:
case SkeletonKernel:
case ThinSEKernel:
break; /* A pre-generated kernel is not needed */
#if 0
/* set to 1 to do a compile-time check that we haven't missed anything */
case UnityKernel:
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case BlurKernel:
case CometKernel:
case BinomialKernel:
case DiamondKernel:
case SquareKernel:
case RectangleKernel:
case OctagonKernel:
case DiskKernel:
case PlusKernel:
case CrossKernel:
case RingKernel:
case PeaksKernel:
case ChebyshevKernel:
case ManhattanKernel:
case OctangonalKernel:
case EuclideanKernel:
#else
default:
#endif
/* Generate the base Kernel Structure */
kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (kernel == (KernelInfo *) NULL)
return(kernel);
(void) memset(kernel,0,sizeof(*kernel));
kernel->minimum = kernel->maximum = kernel->angle = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->type = type;
kernel->next = (KernelInfo *) NULL;
kernel->signature = MagickCoreSignature;
break;
}
switch(type) {
/*
Convolution Kernels
*/
case UnityKernel:
{
kernel->height = kernel->width = (size_t) 1;
kernel->x = kernel->y = (ssize_t) 0;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(1,
sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
kernel->maximum = kernel->values[0] = args->rho;
break;
}
break;
case GaussianKernel:
case DoGKernel:
case LoGKernel:
{ double
sigma = fabs(args->sigma),
sigma2 = fabs(args->xi),
A, B, R;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else if ( (type != DoGKernel) || (sigma >= sigma2) )
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma);
else
kernel->width = GetOptimalKernelWidth2D(args->rho,sigma2);
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) MagickAssumeAligned(AcquireAlignedMemory(
kernel->width,kernel->height*sizeof(*kernel->values)));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* WARNING: The following generates a 'sampled gaussian' kernel.
* What we really want is a 'discrete gaussian' kernel.
*
* How to do this is I don't know, but appears to be basied on the
* Error Function 'erf()' (intergral of a gaussian)
*/
if ( type == GaussianKernel || type == DoGKernel )
{ /* Calculate a Gaussian, OR positive half of a DoG */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
if ( type == DoGKernel )
{ /* Subtract a Negative Gaussian for "Difference of Gaussian" */
if ( sigma2 > MagickEpsilon )
{ sigma = sigma2; /* simplify loop expressions */
A = 1.0/(2.0*sigma*sigma);
B = (double) (1.0/(Magick2PI*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] -= exp(-((double)(u*u+v*v))*A)*B;
}
else /* limiting case - a unity (normalized Dirac) kernel */
kernel->values[kernel->x+kernel->y*kernel->width] -= 1.0;
}
if ( type == LoGKernel )
{ /* Calculate a Laplacian of a Gaussian - Or Mexician Hat */
if ( sigma > MagickEpsilon )
{ A = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
B = (double) (1.0/(MagickPI*sigma*sigma*sigma*sigma));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ R = ((double)(u*u+v*v))*A;
kernel->values[i] = (1-R)*exp(-R)*B;
}
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
}
/* Note the above kernels may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (> 0.1) the central value becomes larger than one, and thus
** producing a very bright kernel.
**
** Normalization will still be needed.
*/
/* Normalize the 2D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
break;
}
case BlurKernel:
{ double
sigma = fabs(args->sigma),
alpha, beta;
if ( args->rho >= 1.0 )
kernel->width = (size_t)args->rho*2+1;
else
kernel->width = GetOptimalKernelWidth1D(args->rho,sigma);
kernel->height = 1;
kernel->x = (ssize_t) (kernel->width-1)/2;
kernel->y = 0;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
#if 1
#define KernelRank 3
/* Formula derived from GetBlurKernel() in "effect.c" (plus bug fix).
** It generates a gaussian 3 times the width, and compresses it into
** the expected range. This produces a closer normalization of the
** resulting kernel, especially for very low sigma values.
** As such while wierd it is prefered.
**
** I am told this method originally came from Photoshop.
**
** A properly normalized curve is generated (apart from edge clipping)
** even though we later normalize the result (for edge clipping)
** to allow the correct generation of a "Difference of Blurs".
*/
/* initialize */
v = (ssize_t) (kernel->width*KernelRank-1)/2; /* start/end points to fit range */
(void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
/* Calculate a Positive 1D Gaussian */
if ( sigma > MagickEpsilon )
{ sigma *= KernelRank; /* simplify loop expressions */
alpha = 1.0/(2.0*sigma*sigma);
beta= (double) (1.0/(MagickSQ2PI*sigma ));
for ( u=-v; u <= v; u++) {
kernel->values[(u+v)/KernelRank] +=
exp(-((double)(u*u))*alpha)*beta;
}
}
else /* special case - generate a unity kernel */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
#else
/* Direct calculation without curve averaging
This is equivelent to a KernelRank of 1 */
/* Calculate a Positive Gaussian */
if ( sigma > MagickEpsilon )
{ alpha = 1.0/(2.0*sigma*sigma); /* simplify loop expressions */
beta = 1.0/(MagickSQ2PI*sigma);
for ( i=0, u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = exp(-((double)(u*u))*alpha)*beta;
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
}
#endif
/* Note the above kernel may have been 'clipped' by a user defined
** radius, producing a smaller (darker) kernel. Also for very small
** sigma's (< 0.1) the central value becomes larger than one, as a
** result of not generating a actual 'discrete' kernel, and thus
** producing a very bright 'impulse'.
**
** Becuase of these two factors Normalization is required!
*/
/* Normalize the 1D Gaussian Kernel
**
** NB: a CorrelateNormalize performs a normal Normalize if
** there are no negative values.
*/
CalcKernelMetaData(kernel); /* the other kernel meta-data */
ScaleKernelInfo(kernel, 1.0, CorrelateNormalizeValue);
/* rotate the 1D kernel by given angle */
RotateKernelInfo(kernel, args->xi );
break;
}
case CometKernel:
{ double
sigma = fabs(args->sigma),
A;
if ( args->rho < 1.0 )
kernel->width = (GetOptimalKernelWidth1D(args->rho,sigma)-1)/2+1;
else
kernel->width = (size_t)args->rho;
kernel->x = kernel->y = 0;
kernel->height = 1;
kernel->negative_range = kernel->positive_range = 0.0;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* A comet blur is half a 1D gaussian curve, so that the object is
** blurred in one direction only. This may not be quite the right
** curve to use so may change in the future. The function must be
** normalised after generation, which also resolves any clipping.
**
** As we are normalizing and not subtracting gaussians,
** there is no need for a divisor in the gaussian formula
**
** It is less comples
*/
if ( sigma > MagickEpsilon )
{
#if 1
#define KernelRank 3
v = (ssize_t) kernel->width*KernelRank; /* start/end points */
(void) memset(kernel->values,0, (size_t)
kernel->width*sizeof(*kernel->values));
sigma *= KernelRank; /* simplify the loop expression */
A = 1.0/(2.0*sigma*sigma);
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( u=0; u < v; u++) {
kernel->values[u/KernelRank] +=
exp(-((double)(u*u))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
}
for (i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range += kernel->values[i];
#else
A = 1.0/(2.0*sigma*sigma); /* simplify the loop expression */
/* B = 1.0/(MagickSQ2PI*sigma); */
for ( i=0; i < (ssize_t) kernel->width; i++)
kernel->positive_range +=
kernel->values[i] = exp(-((double)(i*i))*A);
/* exp(-((double)(i*i))/2.0*sigma*sigma)/(MagickSQ2PI*sigma); */
#endif
}
else /* special case - generate a unity kernel */
{ (void) memset(kernel->values,0, (size_t)
kernel->width*kernel->height*sizeof(*kernel->values));
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
}
kernel->minimum = 0.0;
kernel->maximum = kernel->values[0];
kernel->negative_range = 0.0;
ScaleKernelInfo(kernel, 1.0, NormalizeValue); /* Normalize */
RotateKernelInfo(kernel, args->xi); /* Rotate by angle */
break;
}
case BinomialKernel:
{
size_t
order_f;
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
order_f = fact(kernel->width-1);
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=0; v < (ssize_t)kernel->height; v++)
{ size_t
alpha = order_f / ( fact((size_t) v) * fact(kernel->height-v-1) );
for ( u=0; u < (ssize_t)kernel->width; u++, i++)
kernel->positive_range += kernel->values[i] = (double)
(alpha * order_f / ( fact((size_t) u) * fact(kernel->height-u-1) ));
}
kernel->minimum = 1.0;
kernel->maximum = kernel->values[kernel->x+kernel->y*kernel->width];
kernel->negative_range = 0.0;
break;
}
/*
Convolution Kernels - Well Known Named Constant Kernels
*/
case LaplacianKernel:
{ switch ( (int) args->rho ) {
case 0:
default: /* laplacian square filter -- default */
kernel=ParseKernelArray("3: -1,-1,-1 -1,8,-1 -1,-1,-1");
break;
case 1: /* laplacian diamond filter */
kernel=ParseKernelArray("3: 0,-1,0 -1,4,-1 0,-1,0");
break;
case 2:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
break;
case 3:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 1,-2,1");
break;
case 5: /* a 5x5 laplacian */
kernel=ParseKernelArray(
"5: -4,-1,0,-1,-4 -1,2,3,2,-1 0,3,4,3,0 -1,2,3,2,-1 -4,-1,0,-1,-4");
break;
case 7: /* a 7x7 laplacian */
kernel=ParseKernelArray(
"7:-10,-5,-2,-1,-2,-5,-10 -5,0,3,4,3,0,-5 -2,3,6,7,6,3,-2 -1,4,7,8,7,4,-1 -2,3,6,7,6,3,-2 -5,0,3,4,3,0,-5 -10,-5,-2,-1,-2,-5,-10" );
break;
case 15: /* a 5x5 LoG (sigma approx 1.4) */
kernel=ParseKernelArray(
"5: 0,0,-1,0,0 0,-1,-2,-1,0 -1,-2,16,-2,-1 0,-1,-2,-1,0 0,0,-1,0,0");
break;
case 19: /* a 9x9 LoG (sigma approx 1.4) */
/* http://www.cscjournals.org/csc/manuscript/Journals/IJIP/volume3/Issue1/IJIP-15.pdf */
kernel=ParseKernelArray(
"9: 0,-1,-1,-2,-2,-2,-1,-1,0 -1,-2,-4,-5,-5,-5,-4,-2,-1 -1,-4,-5,-3,-0,-3,-5,-4,-1 -2,-5,-3,12,24,12,-3,-5,-2 -2,-5,-0,24,40,24,-0,-5,-2 -2,-5,-3,12,24,12,-3,-5,-2 -1,-4,-5,-3,-0,-3,-5,-4,-1 -1,-2,-4,-5,-5,-5,-4,-2,-1 0,-1,-1,-2,-2,-2,-1,-1,0");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
break;
}
case SobelKernel:
{ /* Simple Sobel Kernel */
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case RobertsKernel:
{
kernel=ParseKernelArray("3: 0,0,0 1,-1,0 0,0,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case PrewittKernel:
{
kernel=ParseKernelArray("3: 1,0,-1 1,0,-1 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case CompassKernel:
{
kernel=ParseKernelArray("3: 1,1,-1 1,-2,-1 1,1,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case KirschKernel:
{
kernel=ParseKernelArray("3: 5,-3,-3 5,0,-3 5,-3,-3");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->rho);
break;
}
case FreiChenKernel:
/* Direction is set to be left to right positive */
/* http://www.math.tau.ac.il/~turkel/notes/edge_detectors.pdf -- RIGHT? */
/* http://ltswww.epfl.ch/~courstiv/exos_labos/sol3.pdf -- WRONG? */
{ switch ( (int) args->rho ) {
default:
case 0:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
break;
case 2:
kernel=ParseKernelArray("3: 1,2,0 2,0,-2 0,-2,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = kernel->values[3]= +MagickSQ2;
kernel->values[5] = kernel->values[7]= -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 10:
kernel=AcquireKernelInfo("FreiChen:11;FreiChen:12;FreiChen:13;FreiChen:14;FreiChen:15;FreiChen:16;FreiChen:17;FreiChen:18;FreiChen:19");
if (kernel == (KernelInfo *) NULL)
return(kernel);
break;
case 1:
case 11:
kernel=ParseKernelArray("3: 1,0,-1 2,0,-2 1,0,-1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[3] = +MagickSQ2;
kernel->values[5] = -MagickSQ2;
CalcKernelMetaData(kernel); /* recalculate meta-data */
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 12:
kernel=ParseKernelArray("3: 1,2,1 0,0,0 1,2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[1] = +MagickSQ2;
kernel->values[7] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 13:
kernel=ParseKernelArray("3: 2,-1,0 -1,0,1 0,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[0] = +MagickSQ2;
kernel->values[8] = -MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 14:
kernel=ParseKernelArray("3: 0,1,-2 -1,0,1 2,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->values[2] = -MagickSQ2;
kernel->values[6] = +MagickSQ2;
CalcKernelMetaData(kernel);
ScaleKernelInfo(kernel, (double) (1.0/2.0*MagickSQ2), NoValue);
break;
case 15:
kernel=ParseKernelArray("3: 0,-1,0 1,0,1 0,-1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 16:
kernel=ParseKernelArray("3: 1,0,-1 0,0,0 -1,0,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/2.0, NoValue);
break;
case 17:
kernel=ParseKernelArray("3: 1,-2,1 -2,4,-2 -1,-2,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 18:
kernel=ParseKernelArray("3: -2,1,-2 1,4,1 -2,1,-2");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/6.0, NoValue);
break;
case 19:
kernel=ParseKernelArray("3: 1,1,1 1,1,1 1,1,1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ScaleKernelInfo(kernel, 1.0/3.0, NoValue);
break;
}
if ( fabs(args->sigma) >= MagickEpsilon )
/* Rotate by correctly supplied 'angle' */
RotateKernelInfo(kernel, args->sigma);
else if ( args->rho > 30.0 || args->rho < -30.0 )
/* Rotate by out of bounds 'type' */
RotateKernelInfo(kernel, args->rho);
break;
}
/*
Boolean or Shaped Kernels
*/
case DiamondKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values within diamond area to scale given */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <= (long) kernel->x)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case SquareKernel:
case RectangleKernel:
{ double
scale;
if ( type == SquareKernel )
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = (size_t) (2*args->rho+1);
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
scale = args->sigma;
}
else {
/* NOTE: user defaults set in "AcquireKernelInfo()" */
if ( args->rho < 1.0 || args->sigma < 1.0 )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->width = (size_t)args->rho;
kernel->height = (size_t)args->sigma;
if ( args->xi < 0.0 || args->xi > (double)kernel->width ||
args->psi < 0.0 || args->psi > (double)kernel->height )
return(DestroyKernelInfo(kernel)); /* invalid args given */
kernel->x = (ssize_t) args->xi;
kernel->y = (ssize_t) args->psi;
scale = 1.0;
}
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values to scale given */
u=(ssize_t) (kernel->width*kernel->height);
for ( i=0; i < u; i++)
kernel->values[i] = scale;
kernel->minimum = kernel->maximum = scale; /* a flat shape */
kernel->positive_range = scale*u;
break;
}
case OctagonKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ( (labs((long) u)+labs((long) v)) <=
((long)kernel->x + (long)(kernel->x/2)) )
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case DiskKernel:
{
ssize_t
limit = (ssize_t)(args->rho*args->rho);
if (args->rho < 0.4) /* default radius approx 4.3 */
kernel->width = kernel->height = 9L, limit = 18L;
else
kernel->width = kernel->height = (size_t)fabs(args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
if ((u*u+v*v) <= limit)
kernel->positive_range += kernel->values[i] = args->sigma;
else
kernel->values[i] = nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
break;
}
case PlusKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == 0 || v == 0) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
case CrossKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 5; /* default radius 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set all kernel values along axises to given scale */
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->values[i] = (u == v || u == -v) ? args->sigma : nan;
kernel->minimum = kernel->maximum = args->sigma; /* a flat shape */
kernel->positive_range = args->sigma*(kernel->width*2.0 - 1.0);
break;
}
/*
HitAndMiss Kernels
*/
case RingKernel:
case PeaksKernel:
{
ssize_t
limit1,
limit2,
scale;
if (args->rho < args->sigma)
{
kernel->width = ((size_t)args->sigma)*2+1;
limit1 = (ssize_t)(args->rho*args->rho);
limit2 = (ssize_t)(args->sigma*args->sigma);
}
else
{
kernel->width = ((size_t)args->rho)*2+1;
limit1 = (ssize_t)(args->sigma*args->sigma);
limit2 = (ssize_t)(args->rho*args->rho);
}
if ( limit2 <= 0 )
kernel->width = 7L, limit1 = 7L, limit2 = 11L;
kernel->height = kernel->width;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
/* set a ring of points of 'scale' ( 0.0 for PeaksKernel ) */
scale = (ssize_t) (( type == PeaksKernel) ? 0.0 : args->xi);
for ( i=0, v= -kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{ ssize_t radius=u*u+v*v;
if (limit1 < radius && radius <= limit2)
kernel->positive_range += kernel->values[i] = (double) scale;
else
kernel->values[i] = nan;
}
kernel->minimum = kernel->maximum = (double) scale;
if ( type == PeaksKernel ) {
/* set the central point in the middle */
kernel->values[kernel->x+kernel->y*kernel->width] = 1.0;
kernel->positive_range = 1.0;
kernel->maximum = 1.0;
}
break;
}
case EdgesKernel:
{
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandMirrorKernelInfo(kernel); /* mirror expansion of kernels */
break;
}
case CornersKernel:
{
kernel=AcquireKernelInfo("ThinSE:87");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* Expand 90 degree rotations */
break;
}
case DiagonalsKernel:
{
switch ( (int) args->rho ) {
case 0:
default:
{ KernelInfo
*new_kernel;
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
new_kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
ExpandMirrorKernelInfo(kernel);
return(kernel);
}
case 1:
kernel=ParseKernelArray("3: 0,0,0 0,-,1 1,1,-");
break;
case 2:
kernel=ParseKernelArray("3: 0,0,1 0,-,1 0,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineEndsKernel:
{ /* Kernels for finding the end of thin lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all end of lines */
return(AcquireKernelInfo("LineEnds:1>;LineEnds:2>"));
case 1:
/* kernel for 4-connected line ends - no rotation */
kernel=ParseKernelArray("3: 0,0,- 0,1,1 0,0,-");
break;
case 2:
/* kernel to add for 8-connected lines - no rotation */
kernel=ParseKernelArray("3: 0,0,0 0,1,0 0,0,1");
break;
case 3:
/* kernel to add for orthogonal line ends - does not find corners */
kernel=ParseKernelArray("3: 0,0,0 0,1,1 0,0,0");
break;
case 4:
/* traditional line end - fails on last T end */
kernel=ParseKernelArray("3: 0,0,0 0,1,- 0,0,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case LineJunctionsKernel:
{ /* kernels for finding the junctions of multiple lines */
switch ( (int) args->rho ) {
case 0:
default:
/* set of kernels to find all line junctions */
return(AcquireKernelInfo("LineJunctions:1@;LineJunctions:2>"));
case 1:
/* Y Junction */
kernel=ParseKernelArray("3: 1,-,1 -,1,- -,1,-");
break;
case 2:
/* Diagonal T Junctions */
kernel=ParseKernelArray("3: 1,-,- -,1,- 1,-,1");
break;
case 3:
/* Orthogonal T Junctions */
kernel=ParseKernelArray("3: -,-,- 1,1,1 -,1,-");
break;
case 4:
/* Diagonal X Junctions */
kernel=ParseKernelArray("3: 1,-,1 -,1,- 1,-,1");
break;
case 5:
/* Orthogonal X Junctions - minimal diamond kernel */
kernel=ParseKernelArray("3: -,1,- 1,1,1 -,1,-");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
case RidgesKernel:
{ /* Ridges - Ridge finding kernels */
KernelInfo
*new_kernel;
switch ( (int) args->rho ) {
case 1:
default:
kernel=ParseKernelArray("3x1:0,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 2 rotated kernels (symmetrical) */
break;
case 2:
kernel=ParseKernelArray("4x1:0,1,1,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotated kernels */
/* Kernels to find a stepped 'thick' line, 4 rotates + mirrors */
/* Unfortunatally we can not yet rotate a non-square kernel */
/* But then we can't flip a non-symetrical kernel either */
new_kernel=ParseKernelArray("4x3+1+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:0,1,1,- -,1,1,- -,1,1,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+1+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("4x3+2+1:-,1,1,0 -,1,1,- 0,1,1,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:0,-,- 1,1,1 1,1,1 -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+1:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
new_kernel=ParseKernelArray("3x4+1+2:-,-,0 1,1,1 1,1,1 0,-,-");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
LastKernelInfo(kernel)->next = new_kernel;
break;
}
break;
}
case ConvexHullKernel:
{
KernelInfo
*new_kernel;
/* first set of 8 kernels */
kernel=ParseKernelArray("3: 1,1,- 1,0,- 1,-,0");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 90.0);
/* append the mirror versions too - no flip function yet */
new_kernel=ParseKernelArray("3: 1,1,1 1,0,- -,-,0");
if (new_kernel == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
new_kernel->type = type;
ExpandRotateKernelInfo(new_kernel, 90.0);
LastKernelInfo(kernel)->next = new_kernel;
break;
}
case SkeletonKernel:
{
switch ( (int) args->rho ) {
case 1:
default:
/* Traditional Skeleton...
** A cyclically rotated single kernel
*/
kernel=AcquireKernelInfo("ThinSE:482");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
ExpandRotateKernelInfo(kernel, 45.0); /* 8 rotations */
break;
case 2:
/* HIPR Variation of the cyclic skeleton
** Corners of the traditional method made more forgiving,
** but the retain the same cyclic order.
*/
kernel=AcquireKernelInfo("ThinSE:482; ThinSE:87x90;");
if (kernel == (KernelInfo *) NULL)
return(kernel);
if (kernel->next == (KernelInfo *) NULL)
return(DestroyKernelInfo(kernel));
kernel->type = type;
kernel->next->type = type;
ExpandRotateKernelInfo(kernel, 90.0); /* 4 rotations of the 2 kernels */
break;
case 3:
/* Dan Bloomberg Skeleton, from his paper on 3x3 thinning SE's
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
*/
kernel=AcquireKernelInfo(
"ThinSE:41; ThinSE:42; ThinSE:43");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
kernel->next->type = type;
kernel->next->next->type = type;
ExpandMirrorKernelInfo(kernel); /* 12 kernels total */
break;
}
break;
}
case ThinSEKernel:
{ /* Special kernels for general thinning, while preserving connections
** "Connectivity-Preserving Morphological Image Thransformations"
** by Dan S. Bloomberg, available on Leptonica, Selected Papers,
** http://www.leptonica.com/papers/conn.pdf
** And
** http://tpgit.github.com/Leptonica/ccthin_8c_source.html
**
** Note kernels do not specify the origin pixel, allowing them
** to be used for both thickening and thinning operations.
*/
switch ( (int) args->rho ) {
/* SE for 4-connected thinning */
case 41: /* SE_4_1 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,-,1");
break;
case 42: /* SE_4_2 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 -,0,-");
break;
case 43: /* SE_4_3 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,-,1");
break;
case 44: /* SE_4_4 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,-");
break;
case 45: /* SE_4_5 */
kernel=ParseKernelArray("3: -,0,1 0,-,1 -,0,-");
break;
case 46: /* SE_4_6 */
kernel=ParseKernelArray("3: -,0,- 0,-,1 -,0,1");
break;
case 47: /* SE_4_7 */
kernel=ParseKernelArray("3: -,1,1 0,-,1 -,0,-");
break;
case 48: /* SE_4_8 */
kernel=ParseKernelArray("3: -,-,1 0,-,1 0,-,1");
break;
case 49: /* SE_4_9 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 -,-,1");
break;
/* SE for 8-connected thinning - negatives of the above */
case 81: /* SE_8_0 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 -,1,-");
break;
case 82: /* SE_8_2 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,-,-");
break;
case 83: /* SE_8_3 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 -,1,-");
break;
case 84: /* SE_8_4 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,-");
break;
case 85: /* SE_8_5 */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,-");
break;
case 86: /* SE_8_6 */
kernel=ParseKernelArray("3: 0,-,- 0,-,1 0,-,1");
break;
case 87: /* SE_8_7 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,0,-");
break;
case 88: /* SE_8_8 */
kernel=ParseKernelArray("3: -,1,- 0,-,1 0,1,-");
break;
case 89: /* SE_8_9 */
kernel=ParseKernelArray("3: 0,1,- 0,-,1 -,1,-");
break;
/* Special combined SE kernels */
case 423: /* SE_4_2 , SE_4_3 Combined Kernel */
kernel=ParseKernelArray("3: -,-,1 0,-,- -,0,-");
break;
case 823: /* SE_8_2 , SE_8_3 Combined Kernel */
kernel=ParseKernelArray("3: -,1,- -,-,1 0,-,-");
break;
case 481: /* SE_48_1 - General Connected Corner Kernel */
kernel=ParseKernelArray("3: -,1,1 0,-,1 0,0,-");
break;
default:
case 482: /* SE_48_2 - General Edge Kernel */
kernel=ParseKernelArray("3: 0,-,1 0,-,1 0,-,1");
break;
}
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = type;
RotateKernelInfo(kernel, args->sigma);
break;
}
/*
Distance Measuring Kernels
*/
case ChebyshevKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*MagickMax(fabs((double)u),fabs((double)v)) );
kernel->maximum = kernel->values[0];
break;
}
case ManhattanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*(labs((long) u)+labs((long) v)) );
kernel->maximum = kernel->values[0];
break;
}
case OctagonalKernel:
{
if (args->rho < 2.0)
kernel->width = kernel->height = 5; /* default/minimum radius = 2 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
{
double
r1 = MagickMax(fabs((double)u),fabs((double)v)),
r2 = floor((double)(labs((long)u)+labs((long)v)+1)/1.5);
kernel->positive_range += kernel->values[i] =
args->sigma*MagickMax(r1,r2);
}
kernel->maximum = kernel->values[0];
break;
}
case EuclideanKernel:
{
if (args->rho < 1.0)
kernel->width = kernel->height = 3; /* default radius = 1 */
else
kernel->width = kernel->height = ((size_t)args->rho)*2+1;
kernel->x = kernel->y = (ssize_t) (kernel->width-1)/2;
kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (kernel->values == (double *) NULL)
return(DestroyKernelInfo(kernel));
for ( i=0, v=-kernel->y; v <= (ssize_t)kernel->y; v++)
for ( u=-kernel->x; u <= (ssize_t)kernel->x; u++, i++)
kernel->positive_range += ( kernel->values[i] =
args->sigma*sqrt((double)(u*u+v*v)) );
kernel->maximum = kernel->values[0];
break;
}
default:
{
/* No-Op Kernel - Basically just a single pixel on its own */
kernel=ParseKernelArray("1:1");
if (kernel == (KernelInfo *) NULL)
return(kernel);
kernel->type = UndefinedKernel;
break;
}
break;
}
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneKernelInfo() creates a new clone of the given Kernel List so that its
% can be modified without effecting the original. The cloned kernel should
% be destroyed using DestoryKernelInfo() when no longer needed.
%
% The format of the CloneKernelInfo method is:
%
% KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be cloned
%
*/
MagickExport KernelInfo *CloneKernelInfo(const KernelInfo *kernel)
{
register ssize_t
i;
KernelInfo
*new_kernel;
assert(kernel != (KernelInfo *) NULL);
new_kernel=(KernelInfo *) AcquireMagickMemory(sizeof(*kernel));
if (new_kernel == (KernelInfo *) NULL)
return(new_kernel);
*new_kernel=(*kernel); /* copy values in structure */
/* replace the values with a copy of the values */
new_kernel->values=(double *) AcquireAlignedMemory(kernel->width,
kernel->height*sizeof(*kernel->values));
if (new_kernel->values == (double *) NULL)
return(DestroyKernelInfo(new_kernel));
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
new_kernel->values[i]=kernel->values[i];
/* Also clone the next kernel in the kernel list */
if ( kernel->next != (KernelInfo *) NULL ) {
new_kernel->next = CloneKernelInfo(kernel->next);
if ( new_kernel->next == (KernelInfo *) NULL )
return(DestroyKernelInfo(new_kernel));
}
return(new_kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyKernelInfo() frees the memory used by a Convolution/Morphology
% kernel.
%
% The format of the DestroyKernelInfo method is:
%
% KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to be destroyed
%
*/
MagickExport KernelInfo *DestroyKernelInfo(KernelInfo *kernel)
{
assert(kernel != (KernelInfo *) NULL);
if (kernel->next != (KernelInfo *) NULL)
kernel->next=DestroyKernelInfo(kernel->next);
kernel->values=(double *) RelinquishAlignedMemory(kernel->values);
kernel=(KernelInfo *) RelinquishMagickMemory(kernel);
return(kernel);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d M i r r o r K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandMirrorKernelInfo() takes a single kernel, and expands it into a
% sequence of 90-degree rotated kernels but providing a reflected 180
% rotatation, before the -/+ 90-degree rotations.
%
% This special rotation order produces a better, more symetrical thinning of
% objects.
%
% The format of the ExpandMirrorKernelInfo method is:
%
% void ExpandMirrorKernelInfo(KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
#if 0
static void FlopKernelInfo(KernelInfo *kernel)
{ /* Do a Flop by reversing each row. */
size_t
y;
register ssize_t
x,r;
register double
*k,t;
for ( y=0, k=kernel->values; y < kernel->height; y++, k+=kernel->width)
for ( x=0, r=kernel->width-1; x<kernel->width/2; x++, r--)
t=k[x], k[x]=k[r], k[r]=t;
kernel->x = kernel->width - kernel->x - 1;
angle = fmod(angle+180.0, 360.0);
}
#endif
static void ExpandMirrorKernelInfo(KernelInfo *kernel)
{
KernelInfo
*clone,
*last;
last = kernel;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flip */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 90); /* transpose */
LastKernelInfo(last)->next = clone;
last = clone;
clone = CloneKernelInfo(last);
if (clone == (KernelInfo *) NULL)
return;
RotateKernelInfo(clone, 180); /* flop */
LastKernelInfo(last)->next = clone;
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ E x p a n d R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ExpandRotateKernelInfo() takes a kernel list, and expands it by rotating
% incrementally by the angle given, until the kernel repeats.
%
% WARNING: 45 degree rotations only works for 3x3 kernels.
% While 90 degree roatations only works for linear and square kernels
%
% The format of the ExpandRotateKernelInfo method is:
%
% void ExpandRotateKernelInfo(KernelInfo *kernel,double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is only internel to this module, as it is not finalized,
% especially with regard to non-orthogonal angles, and rotation of larger
% 2D kernels.
*/
/* Internal Routine - Return true if two kernels are the same */
static MagickBooleanType SameKernelInfo(const KernelInfo *kernel1,
const KernelInfo *kernel2)
{
register size_t
i;
/* check size and origin location */
if ( kernel1->width != kernel2->width
|| kernel1->height != kernel2->height
|| kernel1->x != kernel2->x
|| kernel1->y != kernel2->y )
return MagickFalse;
/* check actual kernel values */
for (i=0; i < (kernel1->width*kernel1->height); i++) {
/* Test for Nan equivalence */
if ( IsNaN(kernel1->values[i]) && !IsNaN(kernel2->values[i]) )
return MagickFalse;
if ( IsNaN(kernel2->values[i]) && !IsNaN(kernel1->values[i]) )
return MagickFalse;
/* Test actual values are equivalent */
if ( fabs(kernel1->values[i] - kernel2->values[i]) >= MagickEpsilon )
return MagickFalse;
}
return MagickTrue;
}
static void ExpandRotateKernelInfo(KernelInfo *kernel,const double angle)
{
KernelInfo
*clone_info,
*last;
clone_info=(KernelInfo *) NULL;
last=kernel;
DisableMSCWarning(4127)
while (1) {
RestoreMSCWarning
clone_info=CloneKernelInfo(last);
if (clone_info == (KernelInfo *) NULL)
break;
RotateKernelInfo(clone_info,angle);
if (SameKernelInfo(kernel,clone_info) != MagickFalse)
break;
LastKernelInfo(last)->next=clone_info;
last=clone_info;
}
if (clone_info != (KernelInfo *) NULL)
clone_info=DestroyKernelInfo(clone_info); /* kernel repeated - junk */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ C a l c M e t a K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CalcKernelMetaData() recalculate the KernelInfo meta-data of this kernel only,
% using the kernel values. This should only ne used if it is not possible to
% calculate that meta-data in some easier way.
%
% It is important that the meta-data is correct before ScaleKernelInfo() is
% used to perform kernel normalization.
%
% The format of the CalcKernelMetaData method is:
%
% void CalcKernelMetaData(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% WARNING: Minimum and Maximum values are assumed to include zero, even if
% zero is not part of the kernel (as in Gaussian Derived kernels). This
% however is not true for flat-shaped morphological kernels.
%
% WARNING: Only the specific kernel pointed to is modified, not a list of
% multiple kernels.
%
% This is an internal function and not expected to be useful outside this
% module. This could change however.
*/
static void CalcKernelMetaData(KernelInfo *kernel)
{
register size_t
i;
kernel->minimum = kernel->maximum = 0.0;
kernel->negative_range = kernel->positive_range = 0.0;
for (i=0; i < (kernel->width*kernel->height); i++)
{
if ( fabs(kernel->values[i]) < MagickEpsilon )
kernel->values[i] = 0.0;
( kernel->values[i] < 0)
? ( kernel->negative_range += kernel->values[i] )
: ( kernel->positive_range += kernel->values[i] );
Minimize(kernel->minimum, kernel->values[i]);
Maximize(kernel->maximum, kernel->values[i]);
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y A p p l y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyApply() applies a morphological method, multiple times using
% a list of multiple kernels. This is the method that should be called by
% other 'operators' that internally use morphology operations as part of
% their processing.
%
% It is basically equivalent to as MorphologyImage() (see below) but
% without any user controls. This allows internel programs to use this
% function, to actually perform a specific task without possible interference
% by any API user supplied settings.
%
% It is MorphologyImage() task to extract any such user controls, and
% pass them to this function for processing.
%
% More specifically all given kernels should already be scaled, normalised,
% and blended appropriatally before being parred to this routine. The
% appropriate bias, and compose (typically 'UndefinedComposeOp') given.
%
% The format of the MorphologyApply method is:
%
% Image *MorphologyApply(const Image *image,MorphologyMethod method,
% const ChannelType channel, const ssize_t iterations,
% const KernelInfo *kernel, const CompositeMethod compose,
% const double bias, ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the source image
%
% o method: the morphology method to be applied.
%
% o channel: the channels to which the operations are applied
% The channel 'sync' flag determines if 'alpha weighting' is
% applied for convolution style operations.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
%
% o compose: How to handle or merge multi-kernel results.
% If 'UndefinedCompositeOp' use default for the Morphology method.
% If 'NoCompositeOp' force image to be re-iterated by each kernel.
% Otherwise merge the results using the compose method given.
%
% o bias: Convolution Output Bias.
%
% o exception: return any errors or warnings in this structure.
%
*/
/* Apply a Morphology Primative to an image using the given kernel.
** Two pre-created images must be provided, and no image is created.
** It returns the number of pixels that changed between the images
** for result convergence determination.
*/
static ssize_t MorphologyPrimitive(const Image *image, Image *result_image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,const double bias,ExceptionInfo *exception)
{
#define MorphologyTag "Morphology/Image"
CacheView
*p_view,
*q_view;
register ssize_t
i;
size_t
*changes,
changed,
virt_width;
ssize_t
y,
offx,
offy;
MagickBooleanType
status;
MagickOffsetType
progress;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(result_image != (Image *) NULL);
assert(result_image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
status=MagickTrue;
progress=0;
p_view=AcquireVirtualCacheView(image,exception);
q_view=AcquireAuthenticCacheView(result_image,exception);
virt_width=image->columns+kernel->width-1;
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case ConvolveMorphology:
case DilateMorphology:
case DilateIntensityMorphology:
case IterativeDistanceMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
case ErodeMorphology:
case ErodeIntensityMorphology:
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* kernel is used as is, without reflection */
break;
default:
assert("Not a Primitive Morphology Method" != (char *) NULL);
break;
}
changed=0;
changes=(size_t *) AcquireQuantumMemory(GetOpenMPMaximumThreads(),
sizeof(*changes));
if (changes == (size_t *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changes[i]=0;
if ( method == ConvolveMorphology && kernel->width == 1 )
{ /* Special handling (for speed) of vertical (blur) kernels.
** This performs its handling in columns rather than in rows.
** This is only done for convolve as it is the only method that
** generates very large 1-D vertical kernels (such as a 'BlurKernel')
**
** Timing tests (on single CPU laptop)
** Using a vertical 1-d Blue with normal row-by-row (below)
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.807u
** Using this column method
** time convert logo: -morphology Convolve Blur:0x10+90 null:
** 0.620u
**
** Anthony Thyssen, 14 June 2010
*/
register ssize_t
x;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->columns,1)
#endif
for (x=0; x < (ssize_t) image->columns; x++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
y;
ssize_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view,x,-offy,1,image->rows+kernel->height-1,
exception);
q=GetCacheViewAuthenticPixels(q_view,x,0,1,result_image->rows,exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = offy;
for (y=0; y < (ssize_t) image->rows; y++)
{
DoublePixelPacket
result;
register ssize_t
v;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,GetPixelIndex(p_indexes+y+r));
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+y;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*GetPixelRed(k_pixels);
result.green += (*k)*GetPixelGreen(k_pixels);
result.blue += (*k)*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+y,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
gamma; /* divisor, sum of color alpha weighting */
MagickRealType
alpha; /* alpha weighting for colors : alpha */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-GetPixelOpacity(k_pixels));
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*GetPixelRed(k_pixels);
result.green += alpha*GetPixelGreen(k_pixels);
result.blue += alpha*GetPixelBlue(k_pixels);
result.opacity += (*k)*GetPixelOpacity(k_pixels);
if ( image->colorspace == CMYKColorspace)
result.index += alpha*(*k_indexes);
k--;
k_pixels++;
k_indexes++;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height/count;
SetPixelRed(q,ClampToQuantum(gamma*result.red));
SetPixelGreen(q,ClampToQuantum(gamma*result.green));
SetPixelBlue(q,ClampToQuantum(gamma*result.blue));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+y,ClampToQuantum(gamma*result.index));
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q))
|| ( p[r].green != GetPixelGreen(q))
|| ( p[r].blue != GetPixelBlue(q))
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+y+r) != GetPixelIndex(q_indexes+y))) )
changes[id]++;
p++;
q++;
} /* y */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* x */
result_image->type=image->type;
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t) changed : 0);
}
/*
** Normal handling of horizontal or rectangular kernels (row by row)
*/
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static) shared(progress,status) \
magick_number_threads(image,result_image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
size_t
r;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(p_view, -offx, y-offy, virt_width,
kernel->height, exception);
q=GetCacheViewAuthenticPixels(q_view,0,y,result_image->columns,1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
{
status=MagickFalse;
continue;
}
p_indexes=GetCacheViewVirtualIndexQueue(p_view);
q_indexes=GetCacheViewAuthenticIndexQueue(q_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
DoublePixelPacket
result,
min,
max;
/* Copy input image to the output image for unused channels
* This removes need for 'cloning' a new image every iteration
*/
*q = p[r];
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,GetPixelIndex(p_indexes+x+r));
/* Defaults */
min.red =
min.green =
min.blue =
min.opacity =
min.index = (double) QuantumRange;
max.red =
max.green =
max.blue =
max.opacity =
max.index = 0.0;
/* default result is the original pixel value */
result.red = (double) p[r].red;
result.green = (double) p[r].green;
result.blue = (double) p[r].blue;
result.opacity = QuantumRange - (double) p[r].opacity;
result.index = 0.0;
if ( image->colorspace == CMYKColorspace)
result.index = (double) GetPixelIndex(p_indexes+x+r);
switch (method) {
case ConvolveMorphology:
/* Set the bias of the weighted average output */
result.red =
result.green =
result.blue =
result.opacity =
result.index = bias;
break;
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
/* use a boolean flag indicating when first match found */
result.red = 0.0; /* result is not used otherwise */
break;
default:
break;
}
switch ( method ) {
case ConvolveMorphology:
/* Weighted Average of pixels using reflected kernel
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** Correlation is actually the same as this but without reflecting
** the kernel, and thus 'lower-level' that Convolution. However
** as Convolution is the more common method used, and it does not
** really cost us much in terms of processing to use a reflected
** kernel, so it is Convolution that is implemented.
**
** Correlation will have its kernel reflected before calling
** this function to do a Convolve.
**
** For more details of Correlation vs Convolution see
** http://www.cs.umd.edu/~djacobs/CMSC426/Convolution.pdf
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
if ( ((channel & SyncChannels) == 0 ) ||
(image->matte == MagickFalse) )
{ /* No 'Sync' involved.
** Convolution is simple greyscale channel operation
*/
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
result.red += (*k)*k_pixels[u].red;
result.green += (*k)*k_pixels[u].green;
result.blue += (*k)*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index += (*k)*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum((MagickRealType) result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum((MagickRealType) result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum((MagickRealType) result.blue));
if (((channel & OpacityChannel) != 0) &&
(image->matte != MagickFalse))
SetPixelOpacity(q,ClampToQuantum((MagickRealType) result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
}
else
{ /* Channel 'Sync' Flag, and Alpha Channel enabled.
** Weight the color channels with Alpha Channel so that
** transparent pixels are not part of the results.
*/
double
alpha, /* alpha weighting for colors : alpha */
gamma; /* divisor, sum of color alpha weighting */
size_t
count; /* alpha valus collected, number kernel values */
count=0;
gamma=0.0;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
alpha=QuantumScale*(QuantumRange-k_pixels[u].opacity);
count++; /* number of alpha values collected */
alpha*=(*k); /* include kernel weighting now */
gamma += alpha; /* normalize alpha weights only */
result.red += alpha*k_pixels[u].red;
result.green += alpha*k_pixels[u].green;
result.blue += alpha*k_pixels[u].blue;
result.opacity += (*k)*k_pixels[u].opacity;
if ( image->colorspace == CMYKColorspace)
result.index+=alpha*GetPixelIndex(k_indexes+u);
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Sync'ed channels, all channels are modified */
gamma=PerceptibleReciprocal(gamma);
if (count != 0)
gamma*=(double) kernel->height*kernel->width/count;
SetPixelRed(q,ClampToQuantum((MagickRealType) (gamma*result.red)));
SetPixelGreen(q,ClampToQuantum((MagickRealType) (gamma*result.green)));
SetPixelBlue(q,ClampToQuantum((MagickRealType) (gamma*result.blue)));
SetPixelOpacity(q,ClampToQuantum(result.opacity));
if (image->colorspace == CMYKColorspace)
SetPixelIndex(q_indexes+x,ClampToQuantum((MagickRealType) (gamma*
result.index)));
}
break;
case ErodeMorphology:
/* Minimum Value within kernel neighbourhood
**
** NOTE that the kernel is not reflected for this operation!
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateMorphology:
/* Maximum Value within kernel neighbourhood
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
**
** NOTE: in normal Greyscale Morphology, the kernel value should
** be added to the real value, this is currently not done, due to
** the nature of the boolean kernels being used.
**
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case HitAndMissMorphology:
case ThinningMorphology:
case ThickenMorphology:
/* Minimum of Foreground Pixel minus Maxumum of Background Pixels
**
** NOTE that the kernel is not reflected for this operation,
** and consists of both foreground and background pixel
** neighbourhoods, 0.0 for background, and 1.0 for foreground
** with either Nan or 0.5 values for don't care.
**
** Note that this will never produce a meaningless negative
** result. Such results can cause Thinning/Thicken to not work
** correctly when used against a greyscale image.
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) ) continue;
if ( (*k) > 0.7 )
{ /* minimim of foreground pixels */
Minimize(min.red, (double) k_pixels[u].red);
Minimize(min.green, (double) k_pixels[u].green);
Minimize(min.blue, (double) k_pixels[u].blue);
Minimize(min.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(min.index,(double) GetPixelIndex(
k_indexes+u));
}
else if ( (*k) < 0.3 )
{ /* maximum of background pixels */
Maximize(max.red, (double) k_pixels[u].red);
Maximize(max.green, (double) k_pixels[u].green);
Maximize(max.blue, (double) k_pixels[u].blue);
Maximize(max.opacity,
QuantumRange-(double) k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Maximize(max.index, (double) GetPixelIndex(
k_indexes+u));
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* Pattern Match if difference is positive */
min.red -= max.red; Maximize( min.red, 0.0 );
min.green -= max.green; Maximize( min.green, 0.0 );
min.blue -= max.blue; Maximize( min.blue, 0.0 );
min.opacity -= max.opacity; Maximize( min.opacity, 0.0 );
min.index -= max.index; Maximize( min.index, 0.0 );
break;
case ErodeIntensityMorphology:
/* Select Pixel with Minimum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity.
**
** NOTE that the kernel is not reflected for this operation!
*/
k = kernel->values;
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k++) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue;
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) < GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case DilateIntensityMorphology:
/* Select Pixel with Maximum Intensity within kernel neighbourhood
**
** WARNING: the intensity test fails for CMYK and does not
** take into account the moderating effect of the alpha channel
** on the intensity (yet).
**
** NOTE for correct working of this operation for asymetrical
** kernels, the kernel needs to be applied in its reflected form.
** That is its values needs to be reversed.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) || (*k) < 0.5 ) continue; /* boolean kernel */
if ( result.red == 0.0 ||
GetPixelIntensity(image,&(k_pixels[u])) > GetPixelIntensity(result_image,q) ) {
/* copy the whole pixel - no channel selection */
*q = k_pixels[u];
if ( result.red > 0.0 ) changes[id]++;
result.red = 1.0;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case IterativeDistanceMorphology:
/* Work out an iterative distance from black edge of a white image
** shape. Essentually white values are decreased to the smallest
** 'distance from edge' it can find.
**
** It works by adding kernel values to the neighbourhood, and and
** select the minimum value found. The kernel is rotated before
** use, so kernel distances match resulting distances, when a user
** provided asymmetric kernel is applied.
**
**
** This code is almost identical to True GrayScale Morphology But
** not quite.
**
** GreyDilate Kernel values added, maximum value found Kernel is
** rotated before use.
**
** GrayErode: Kernel values subtracted and minimum value found No
** kernel rotation used.
**
** Note the the Iterative Distance method is essentially a
** GrayErode, but with negative kernel values, and kernel
** rotation applied.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
break;
case UndefinedMorphology:
default:
break; /* Do nothing */
}
/* Final mathematics of results (combine with original image?)
**
** NOTE: Difference Morphology operators Edge* and *Hat could also
** be done here but works better with iteration as a image difference
** in the controlling function (below). Thicken and Thinning however
** should be done here so thay can be iterated correctly.
*/
switch ( method ) {
case HitAndMissMorphology:
case ErodeMorphology:
result = min; /* minimum of neighbourhood */
break;
case DilateMorphology:
result = max; /* maximum of neighbourhood */
break;
case ThinningMorphology:
/* subtract pattern match from original */
result.red -= min.red;
result.green -= min.green;
result.blue -= min.blue;
result.opacity -= min.opacity;
result.index -= min.index;
break;
case ThickenMorphology:
/* Add the pattern matchs to the original */
result.red += min.red;
result.green += min.green;
result.blue += min.blue;
result.opacity += min.opacity;
result.index += min.index;
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case UndefinedMorphology:
case ConvolveMorphology:
case DilateIntensityMorphology:
case ErodeIntensityMorphology:
break; /* full pixel was directly assigned - not a channel method */
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if ((channel & OpacityChannel) != 0
&& image->matte != MagickFalse )
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changes[id]++;
p++;
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(q_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
proceed=SetImageProgress(image,MorphologyTag,progress,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
} /* y */
q_view=DestroyCacheView(q_view);
p_view=DestroyCacheView(p_view);
for (i=0; i < (ssize_t) GetOpenMPMaximumThreads(); i++)
changed+=changes[i];
changes=(size_t *) RelinquishMagickMemory(changes);
return(status ? (ssize_t)changed : -1);
}
/* This is almost identical to the MorphologyPrimative() function above,
** but will apply the primitive directly to the actual image using two
** passes, once in each direction, with the results of the previous (and
** current) row being re-used.
**
** That is after each row is 'Sync'ed' into the image, the next row will
** make use of those values as part of the calculation of the next row.
** It then repeats, but going in the oppisite (bottom-up) direction.
**
** Because of this 're-use of results' this function can not make use
** of multi-threaded, parellel processing.
*/
static ssize_t MorphologyPrimitiveDirect(Image *image,
const MorphologyMethod method, const ChannelType channel,
const KernelInfo *kernel,ExceptionInfo *exception)
{
CacheView
*auth_view,
*virt_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y, offx, offy;
size_t
changed,
virt_width;
status=MagickTrue;
changed=0;
progress=0;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
/* Some methods (including convolve) needs use a reflected kernel.
* Adjust 'origin' offsets to loop though kernel as a reflection.
*/
offx = kernel->x;
offy = kernel->y;
switch(method) {
case DistanceMorphology:
case VoronoiMorphology:
/* kernel needs to used with reflection about origin */
offx = (ssize_t) kernel->width-offx-1;
offy = (ssize_t) kernel->height-offy-1;
break;
#if 0
case ?????Morphology:
/* kernel is used as is, without reflection */
break;
#endif
default:
assert("Not a PrimativeDirect Morphology Method" != (char *) NULL);
break;
}
/* DO NOT THREAD THIS CODE! */
/* two views into same image (virtual, and actual) */
virt_view=AcquireVirtualCacheView(image,exception);
auth_view=AcquireAuthenticCacheView(image,exception);
virt_width=image->columns+kernel->width-1;
for (y=0; y < (ssize_t) image->rows; y++)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only top half of kernel is processed as we do a single pass downward
** through the image iterating the distance function as we go.
*/
if (status == MagickFalse)
break;
p=GetCacheViewVirtualPixels(virt_view, -offx, y-offy, virt_width, (size_t) offy+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* offset to origin in 'p'. while 'q' points to it directly */
r = (ssize_t) virt_width*offy + offx;
for (x=0; x < (ssize_t) image->columns; x++)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Starting Defaults */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, while coping the color
** values of the closest pixel.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel so that alpha can
** also be used as part of the results.
*/
k = &kernel->values[ kernel->width*kernel->height-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=0; v <= (ssize_t) offy; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=0; u < (ssize_t) offx; u++, k--) {
if ( x+u-offx < 0 ) continue; /* off the edge! */
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p++; /* increment pixel buffers */
q++;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if (SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
/* Do the reversed pass through the image */
for (y=(ssize_t)image->rows-1; y >= 0; y--)
{
register const PixelPacket
*magick_restrict p;
register const IndexPacket
*magick_restrict p_indexes;
register PixelPacket
*magick_restrict q;
register IndexPacket
*magick_restrict q_indexes;
register ssize_t
x;
ssize_t
r;
if (status == MagickFalse)
break;
/* NOTE read virtual pixels, and authentic pixels, from the same image!
** we read using virtual to get virtual pixel handling, but write back
** into the same image.
**
** Only the bottom half of the kernel will be processes as we
** up the image.
*/
p=GetCacheViewVirtualPixels(virt_view, -offx, y, virt_width, (size_t) kernel->y+1,
exception);
q=GetCacheViewAuthenticPixels(auth_view, 0, y, image->columns, 1,
exception);
if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL))
status=MagickFalse;
if (status == MagickFalse)
break;
p_indexes=GetCacheViewVirtualIndexQueue(virt_view);
q_indexes=GetCacheViewAuthenticIndexQueue(auth_view);
/* adjust positions to end of row */
p += image->columns-1;
q += image->columns-1;
/* offset to origin in 'p'. while 'q' points to it directly */
r = offx;
for (x=(ssize_t)image->columns-1; x >= 0; x--)
{
ssize_t
v;
register ssize_t
u;
register const double
*magick_restrict k;
register const PixelPacket
*magick_restrict k_pixels;
register const IndexPacket
*magick_restrict k_indexes;
MagickPixelPacket
result;
/* Default - previously modified pixel */
GetMagickPixelPacket(image,&result);
SetMagickPixelPacket(image,q,q_indexes,&result);
if ( method != VoronoiMorphology )
result.opacity = QuantumRange - result.opacity;
switch ( method ) {
case DistanceMorphology:
/* Add kernel Value and select the minimum value found. */
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index,(*k)+GetPixelIndex(k_indexes+u));
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
Minimize(result.red, (*k)+k_pixels[u].red);
Minimize(result.green, (*k)+k_pixels[u].green);
Minimize(result.blue, (*k)+k_pixels[u].blue);
Minimize(result.opacity, (*k)+QuantumRange-k_pixels[u].opacity);
if ( image->colorspace == CMYKColorspace)
Minimize(result.index, (*k)+GetPixelIndex(k_indexes+u));
}
break;
case VoronoiMorphology:
/* Apply Distance to 'Matte' channel, coping the closest color.
**
** This is experimental, and realy the 'alpha' component should
** be completely separate 'masking' channel.
*/
k = &kernel->values[ kernel->width*(kernel->y+1)-1 ];
k_pixels = p;
k_indexes = p_indexes+x;
for (v=offy; v < (ssize_t) kernel->height; v++) {
for (u=0; u < (ssize_t) kernel->width; u++, k--) {
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
k_pixels += virt_width;
k_indexes += virt_width;
}
/* repeat with the just processed pixels of this row */
k = &kernel->values[ kernel->width*(kernel->y)+kernel->x-1 ];
k_pixels = q-offx;
k_indexes = q_indexes-offx;
for (u=offx+1; u < (ssize_t) kernel->width; u++, k--) {
if ( (x+u-offx) >= (ssize_t)image->columns ) continue;
if ( IsNaN(*k) ) continue;
if( result.opacity > (*k)+k_pixels[u].opacity )
{
SetMagickPixelPacket(image,&k_pixels[u],&k_indexes[u],
&result);
result.opacity += *k;
}
}
break;
default:
/* result directly calculated or assigned */
break;
}
/* Assign the resulting pixel values - Clamping Result */
switch ( method ) {
case VoronoiMorphology:
SetPixelPacket(image,&result,q,q_indexes);
break;
default:
if ((channel & RedChannel) != 0)
SetPixelRed(q,ClampToQuantum(result.red));
if ((channel & GreenChannel) != 0)
SetPixelGreen(q,ClampToQuantum(result.green));
if ((channel & BlueChannel) != 0)
SetPixelBlue(q,ClampToQuantum(result.blue));
if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse))
SetPixelAlpha(q,ClampToQuantum(result.opacity));
if (((channel & IndexChannel) != 0) &&
(image->colorspace == CMYKColorspace))
SetPixelIndex(q_indexes+x,ClampToQuantum(result.index));
break;
}
/* Count up changed pixels */
if ( ( p[r].red != GetPixelRed(q) )
|| ( p[r].green != GetPixelGreen(q) )
|| ( p[r].blue != GetPixelBlue(q) )
|| ( (image->matte != MagickFalse) &&
(p[r].opacity != GetPixelOpacity(q)))
|| ( (image->colorspace == CMYKColorspace) &&
(GetPixelIndex(p_indexes+x+r) != GetPixelIndex(q_indexes+x))) )
changed++; /* The pixel was changed in some way! */
p--; /* go backward through pixel buffers */
q--;
} /* x */
if ( SyncCacheViewAuthenticPixels(auth_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp atomic
#endif
progress++;
if ( SetImageProgress(image,MorphologyTag,progress,image->rows) == MagickFalse )
status=MagickFalse;
}
} /* y */
auth_view=DestroyCacheView(auth_view);
virt_view=DestroyCacheView(virt_view);
return(status ? (ssize_t) changed : -1);
}
/* Apply a Morphology by calling one of the above low level primitive
** application functions. This function handles any iteration loops,
** composition or re-iteration of results, and compound morphology methods
** that is based on multiple low-level (staged) morphology methods.
**
** Basically this provides the complex grue between the requested morphology
** method and raw low-level implementation (above).
*/
MagickExport Image *MorphologyApply(const Image *image, const ChannelType
channel,const MorphologyMethod method, const ssize_t iterations,
const KernelInfo *kernel, const CompositeOperator compose,
const double bias, ExceptionInfo *exception)
{
CompositeOperator
curr_compose;
Image
*curr_image, /* Image we are working with or iterating */
*work_image, /* secondary image for primitive iteration */
*save_image, /* saved image - for 'edge' method only */
*rslt_image; /* resultant image - after multi-kernel handling */
KernelInfo
*reflected_kernel, /* A reflected copy of the kernel (if needed) */
*norm_kernel, /* the current normal un-reflected kernel */
*rflt_kernel, /* the current reflected kernel (if needed) */
*this_kernel; /* the kernel being applied */
MorphologyMethod
primitive; /* the current morphology primitive being applied */
CompositeOperator
rslt_compose; /* multi-kernel compose method for results to use */
MagickBooleanType
special, /* do we use a direct modify function? */
verbose; /* verbose output of results */
size_t
method_loop, /* Loop 1: number of compound method iterations (norm 1) */
method_limit, /* maximum number of compound method iterations */
kernel_number, /* Loop 2: the kernel number being applied */
stage_loop, /* Loop 3: primitive loop for compound morphology */
stage_limit, /* how many primitives are in this compound */
kernel_loop, /* Loop 4: iterate the kernel over image */
kernel_limit, /* number of times to iterate kernel */
count, /* total count of primitive steps applied */
kernel_changed, /* total count of changed using iterated kernel */
method_changed; /* total count of changed over method iteration */
ssize_t
changed; /* number pixels changed by last primitive operation */
char
v_info[MaxTextExtent];
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
assert(kernel != (KernelInfo *) NULL);
assert(kernel->signature == MagickCoreSignature);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
count = 0; /* number of low-level morphology primitives performed */
if ( iterations == 0 )
return((Image *) NULL); /* null operation - nothing to do! */
kernel_limit = (size_t) iterations;
if ( iterations < 0 ) /* negative interations = infinite (well alomst) */
kernel_limit = image->columns>image->rows ? image->columns : image->rows;
verbose = IsMagickTrue(GetImageArtifact(image,"debug"));
/* initialise for cleanup */
curr_image = (Image *) image;
curr_compose = image->compose;
(void) curr_compose;
work_image = save_image = rslt_image = (Image *) NULL;
reflected_kernel = (KernelInfo *) NULL;
/* Initialize specific methods
* + which loop should use the given iteratations
* + how many primitives make up the compound morphology
* + multi-kernel compose method to use (by default)
*/
method_limit = 1; /* just do method once, unless otherwise set */
stage_limit = 1; /* assume method is not a compound */
special = MagickFalse; /* assume it is NOT a direct modify primitive */
rslt_compose = compose; /* and we are composing multi-kernels as given */
switch( method ) {
case SmoothMorphology: /* 4 primitive compound morphology */
stage_limit = 4;
break;
case OpenMorphology: /* 2 primitive compound morphology */
case OpenIntensityMorphology:
case TopHatMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case EdgeMorphology:
stage_limit = 2;
break;
case HitAndMissMorphology:
rslt_compose = LightenCompositeOp; /* Union of multi-kernel results */
/* FALL THUR */
case ThinningMorphology:
case ThickenMorphology:
method_limit = kernel_limit; /* iterate the whole method */
kernel_limit = 1; /* do not do kernel iteration */
break;
case DistanceMorphology:
case VoronoiMorphology:
special = MagickTrue; /* use special direct primative */
break;
default:
break;
}
/* Apply special methods with special requirments
** For example, single run only, or post-processing requirements
*/
if ( special != MagickFalse )
{
rslt_image=CloneImage(image,0,0,MagickTrue,exception);
if (rslt_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(rslt_image,DirectClass) == MagickFalse)
{
InheritException(exception,&rslt_image->exception);
goto error_cleanup;
}
changed = MorphologyPrimitiveDirect(rslt_image, method,
channel, kernel, exception);
if ( verbose != MagickFalse )
(void) (void) FormatLocaleFile(stderr,
"%s:%.20g.%.20g #%.20g => Changed %.20g\n",
CommandOptionToMnemonic(MagickMorphologyOptions, method),
1.0,0.0,1.0, (double) changed);
if ( changed < 0 )
goto error_cleanup;
if ( method == VoronoiMorphology ) {
/* Preserve the alpha channel of input image - but turned off */
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
(void) CompositeImageChannel(rslt_image, DefaultChannels,
CopyOpacityCompositeOp, image, 0, 0);
(void) SetImageAlphaChannel(rslt_image, DeactivateAlphaChannel);
}
goto exit_cleanup;
}
/* Handle user (caller) specified multi-kernel composition method */
if ( compose != UndefinedCompositeOp )
rslt_compose = compose; /* override default composition for method */
if ( rslt_compose == UndefinedCompositeOp )
rslt_compose = NoCompositeOp; /* still not defined! Then re-iterate */
/* Some methods require a reflected kernel to use with primitives.
* Create the reflected kernel for those methods. */
switch ( method ) {
case CorrelateMorphology:
case CloseMorphology:
case CloseIntensityMorphology:
case BottomHatMorphology:
case SmoothMorphology:
reflected_kernel = CloneKernelInfo(kernel);
if (reflected_kernel == (KernelInfo *) NULL)
goto error_cleanup;
RotateKernelInfo(reflected_kernel,180);
break;
default:
break;
}
/* Loops around more primitive morpholgy methods
** erose, dilate, open, close, smooth, edge, etc...
*/
/* Loop 1: iterate the compound method */
method_loop = 0;
method_changed = 1;
while ( method_loop < method_limit && method_changed > 0 ) {
method_loop++;
method_changed = 0;
/* Loop 2: iterate over each kernel in a multi-kernel list */
norm_kernel = (KernelInfo *) kernel;
this_kernel = (KernelInfo *) kernel;
rflt_kernel = reflected_kernel;
kernel_number = 0;
while ( norm_kernel != NULL ) {
/* Loop 3: Compound Morphology Staging - Select Primative to apply */
stage_loop = 0; /* the compound morphology stage number */
while ( stage_loop < stage_limit ) {
stage_loop++; /* The stage of the compound morphology */
/* Select primitive morphology for this stage of compound method */
this_kernel = norm_kernel; /* default use unreflected kernel */
primitive = method; /* Assume method is a primitive */
switch( method ) {
case ErodeMorphology: /* just erode */
case EdgeInMorphology: /* erode and image difference */
primitive = ErodeMorphology;
break;
case DilateMorphology: /* just dilate */
case EdgeOutMorphology: /* dilate and image difference */
primitive = DilateMorphology;
break;
case OpenMorphology: /* erode then dialate */
case TopHatMorphology: /* open and image difference */
primitive = ErodeMorphology;
if ( stage_loop == 2 )
primitive = DilateMorphology;
break;
case OpenIntensityMorphology:
primitive = ErodeIntensityMorphology;
if ( stage_loop == 2 )
primitive = DilateIntensityMorphology;
break;
case CloseMorphology: /* dilate, then erode */
case BottomHatMorphology: /* close and image difference */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
if ( stage_loop == 2 )
primitive = ErodeMorphology;
break;
case CloseIntensityMorphology:
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateIntensityMorphology;
if ( stage_loop == 2 )
primitive = ErodeIntensityMorphology;
break;
case SmoothMorphology: /* open, close */
switch ( stage_loop ) {
case 1: /* start an open method, which starts with Erode */
primitive = ErodeMorphology;
break;
case 2: /* now Dilate the Erode */
primitive = DilateMorphology;
break;
case 3: /* Reflect kernel a close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = DilateMorphology;
break;
case 4: /* Finish the Close */
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ErodeMorphology;
break;
}
break;
case EdgeMorphology: /* dilate and erode difference */
primitive = DilateMorphology;
if ( stage_loop == 2 ) {
save_image = curr_image; /* save the image difference */
curr_image = (Image *) image;
primitive = ErodeMorphology;
}
break;
case CorrelateMorphology:
/* A Correlation is a Convolution with a reflected kernel.
** However a Convolution is a weighted sum using a reflected
** kernel. It may seem stange to convert a Correlation into a
** Convolution as the Correlation is the simplier method, but
** Convolution is much more commonly used, and it makes sense to
** implement it directly so as to avoid the need to duplicate the
** kernel when it is not required (which is typically the
** default).
*/
this_kernel = rflt_kernel; /* use the reflected kernel */
primitive = ConvolveMorphology;
break;
default:
break;
}
assert( this_kernel != (KernelInfo *) NULL );
/* Extra information for debugging compound operations */
if ( verbose != MagickFalse ) {
if ( stage_limit > 1 )
(void) FormatLocaleString(v_info,MaxTextExtent,"%s:%.20g.%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions,method),(double)
method_loop,(double) stage_loop);
else if ( primitive != method )
(void) FormatLocaleString(v_info, MaxTextExtent, "%s:%.20g -> ",
CommandOptionToMnemonic(MagickMorphologyOptions, method),(double)
method_loop);
else
v_info[0] = '\0';
}
/* Loop 4: Iterate the kernel with primitive */
kernel_loop = 0;
kernel_changed = 0;
changed = 1;
while ( kernel_loop < kernel_limit && changed > 0 ) {
kernel_loop++; /* the iteration of this kernel */
/* Create a clone as the destination image, if not yet defined */
if ( work_image == (Image *) NULL )
{
work_image=CloneImage(image,0,0,MagickTrue,exception);
if (work_image == (Image *) NULL)
goto error_cleanup;
if (SetImageStorageClass(work_image,DirectClass) == MagickFalse)
{
InheritException(exception,&work_image->exception);
goto error_cleanup;
}
/* work_image->type=image->type; ??? */
}
/* APPLY THE MORPHOLOGICAL PRIMITIVE (curr -> work) */
count++;
changed = MorphologyPrimitive(curr_image, work_image, primitive,
channel, this_kernel, bias, exception);
if ( verbose != MagickFalse ) {
if ( kernel_loop > 1 )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line from previous */
(void) (void) FormatLocaleFile(stderr,
"%s%s%s:%.20g.%.20g #%.20g => Changed %.20g",
v_info,CommandOptionToMnemonic(MagickMorphologyOptions,
primitive),(this_kernel == rflt_kernel ) ? "*" : "",
(double) (method_loop+kernel_loop-1),(double) kernel_number,
(double) count,(double) changed);
}
if ( changed < 0 )
goto error_cleanup;
kernel_changed += changed;
method_changed += changed;
/* prepare next loop */
{ Image *tmp = work_image; /* swap images for iteration */
work_image = curr_image;
curr_image = tmp;
}
if ( work_image == image )
work_image = (Image *) NULL; /* replace input 'image' */
} /* End Loop 4: Iterate the kernel with primitive */
if ( verbose != MagickFalse && kernel_changed != (size_t)changed )
(void) FormatLocaleFile(stderr, " Total %.20g",(double) kernel_changed);
if ( verbose != MagickFalse && stage_loop < stage_limit )
(void) FormatLocaleFile(stderr, "\n"); /* add end-of-line before looping */
#if 0
(void) FormatLocaleFile(stderr, "--E-- image=0x%lx\n", (unsigned long)image);
(void) FormatLocaleFile(stderr, " curr =0x%lx\n", (unsigned long)curr_image);
(void) FormatLocaleFile(stderr, " work =0x%lx\n", (unsigned long)work_image);
(void) FormatLocaleFile(stderr, " save =0x%lx\n", (unsigned long)save_image);
(void) FormatLocaleFile(stderr, " union=0x%lx\n", (unsigned long)rslt_image);
#endif
} /* End Loop 3: Primative (staging) Loop for Coumpound Methods */
/* Final Post-processing for some Compound Methods
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** Turn off SVG composition 'alpha blending'.
*/
switch( method ) {
case EdgeOutMorphology:
case EdgeInMorphology:
case TopHatMorphology:
case BottomHatMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference with original image",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,image,0,0);
break;
case EdgeMorphology:
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr,
"\n%s: Difference of Dilate and Erode",
CommandOptionToMnemonic(MagickMorphologyOptions,method));
(void) CompositeImageChannel(curr_image,(ChannelType)
(channel & ~SyncChannels),DifferenceCompositeOp,save_image,0,0);
save_image = DestroyImage(save_image); /* finished with save image */
break;
default:
break;
}
/* multi-kernel handling: re-iterate, or compose results */
if ( kernel->next == (KernelInfo *) NULL )
rslt_image = curr_image; /* just return the resulting image */
else if ( rslt_compose == NoCompositeOp )
{ if ( verbose != MagickFalse ) {
if ( this_kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " (re-iterate)");
else
(void) FormatLocaleFile(stderr, " (done)");
}
rslt_image = curr_image; /* return result, and re-iterate */
}
else if ( rslt_image == (Image *) NULL)
{ if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (save for compose)");
rslt_image = curr_image;
curr_image = (Image *) image; /* continue with original image */
}
else
{ /* Add the new 'current' result to the composition
**
** The removal of any 'Sync' channel flag in the Image Compositon
** below ensures the methematical compose method is applied in a
** purely mathematical way, and only to the selected channels.
** IE: Turn off SVG composition 'alpha blending'.
*/
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, " (compose \"%s\")",
CommandOptionToMnemonic(MagickComposeOptions, rslt_compose) );
(void) CompositeImageChannel(rslt_image,
(ChannelType) (channel & ~SyncChannels), rslt_compose,
curr_image, 0, 0);
curr_image = DestroyImage(curr_image);
curr_image = (Image *) image; /* continue with original image */
}
if ( verbose != MagickFalse )
(void) FormatLocaleFile(stderr, "\n");
/* loop to the next kernel in a multi-kernel list */
norm_kernel = norm_kernel->next;
if ( rflt_kernel != (KernelInfo *) NULL )
rflt_kernel = rflt_kernel->next;
kernel_number++;
} /* End Loop 2: Loop over each kernel */
} /* End Loop 1: compound method interation */
goto exit_cleanup;
/* Yes goto's are bad, but it makes cleanup lot more efficient */
error_cleanup:
if ( curr_image == rslt_image )
curr_image = (Image *) NULL;
if ( rslt_image != (Image *) NULL )
rslt_image = DestroyImage(rslt_image);
exit_cleanup:
if ( curr_image == rslt_image || curr_image == image )
curr_image = (Image *) NULL;
if ( curr_image != (Image *) NULL )
curr_image = DestroyImage(curr_image);
if ( work_image != (Image *) NULL )
work_image = DestroyImage(work_image);
if ( save_image != (Image *) NULL )
save_image = DestroyImage(save_image);
if ( reflected_kernel != (KernelInfo *) NULL )
reflected_kernel = DestroyKernelInfo(reflected_kernel);
return(rslt_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% M o r p h o l o g y I m a g e C h a n n e l %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% MorphologyImageChannel() applies a user supplied kernel to the image
% according to the given mophology method.
%
% This function applies any and all user defined settings before calling
% the above internal function MorphologyApply().
%
% User defined settings include...
% * Output Bias for Convolution and correlation ("-bias"
or "-define convolve:bias=??")
% * Kernel Scale/normalize settings ("-set 'option:convolve:scale'")
% This can also includes the addition of a scaled unity kernel.
% * Show Kernel being applied ("-set option:showKernel 1")
%
% The format of the MorphologyImage method is:
%
% Image *MorphologyImage(const Image *image,MorphologyMethod method,
% const ssize_t iterations,KernelInfo *kernel,ExceptionInfo *exception)
%
% Image *MorphologyImageChannel(const Image *image, const ChannelType
% channel,MorphologyMethod method,const ssize_t iterations,
% KernelInfo *kernel,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o method: the morphology method to be applied.
%
% o iterations: apply the operation this many times (or no change).
% A value of -1 means loop until no change found.
% How this is applied may depend on the morphology method.
% Typically this is a value of 1.
%
% o channel: the channel type.
%
% o kernel: An array of double representing the morphology kernel.
% Warning: kernel may be normalized for the Convolve method.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *MorphologyImage(const Image *image,
const MorphologyMethod method,const ssize_t iterations,
const KernelInfo *kernel,ExceptionInfo *exception)
{
Image
*morphology_image;
morphology_image=MorphologyImageChannel(image,DefaultChannels,method,
iterations,kernel,exception);
return(morphology_image);
}
MagickExport Image *MorphologyImageChannel(const Image *image,
const ChannelType channel,const MorphologyMethod method,
const ssize_t iterations,const KernelInfo *kernel,ExceptionInfo *exception)
{
KernelInfo
*curr_kernel;
CompositeOperator
compose;
double
bias;
Image
*morphology_image;
/* Apply Convolve/Correlate Normalization and Scaling Factors.
* This is done BEFORE the ShowKernelInfo() function is called so that
* users can see the results of the 'option:convolve:scale' option.
*/
curr_kernel = (KernelInfo *) kernel;
bias=image->bias;
if ((method == ConvolveMorphology) || (method == CorrelateMorphology))
{
const char
*artifact;
artifact = GetImageArtifact(image,"convolve:bias");
if (artifact != (const char *) NULL)
bias=StringToDoubleInterval(artifact,(double) QuantumRange+1.0);
artifact = GetImageArtifact(image,"convolve:scale");
if ( artifact != (const char *) NULL ) {
if ( curr_kernel == kernel )
curr_kernel = CloneKernelInfo(kernel);
if (curr_kernel == (KernelInfo *) NULL) {
curr_kernel=DestroyKernelInfo(curr_kernel);
return((Image *) NULL);
}
ScaleGeometryKernelInfo(curr_kernel, artifact);
}
}
/* display the (normalized) kernel via stderr */
if ( IsMagickTrue(GetImageArtifact(image,"showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"convolve:showKernel"))
|| IsMagickTrue(GetImageArtifact(image,"morphology:showKernel")) )
ShowKernelInfo(curr_kernel);
/* Override the default handling of multi-kernel morphology results
* If 'Undefined' use the default method
* If 'None' (default for 'Convolve') re-iterate previous result
* Otherwise merge resulting images using compose method given.
* Default for 'HitAndMiss' is 'Lighten'.
*/
{ const char
*artifact;
compose = UndefinedCompositeOp; /* use default for method */
artifact = GetImageArtifact(image,"morphology:compose");
if ( artifact != (const char *) NULL)
compose = (CompositeOperator) ParseCommandOption(
MagickComposeOptions,MagickFalse,artifact);
}
/* Apply the Morphology */
morphology_image = MorphologyApply(image, channel, method, iterations,
curr_kernel, compose, bias, exception);
/* Cleanup and Exit */
if ( curr_kernel != kernel )
curr_kernel=DestroyKernelInfo(curr_kernel);
return(morphology_image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
+ R o t a t e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% RotateKernelInfo() rotates the kernel by the angle given.
%
% Currently it is restricted to 90 degree angles, of either 1D kernels
% or square kernels. And 'circular' rotations of 45 degrees for 3x3 kernels.
% It will ignore usless rotations for specific 'named' built-in kernels.
%
% The format of the RotateKernelInfo method is:
%
% void RotateKernelInfo(KernelInfo *kernel, double angle)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o angle: angle to rotate in degrees
%
% This function is currently internal to this module only, but can be exported
% to other modules if needed.
*/
static void RotateKernelInfo(KernelInfo *kernel, double angle)
{
/* angle the lower kernels first */
if ( kernel->next != (KernelInfo *) NULL)
RotateKernelInfo(kernel->next, angle);
/* WARNING: Currently assumes the kernel (rightly) is horizontally symetrical
**
** TODO: expand beyond simple 90 degree rotates, flips and flops
*/
/* Modulus the angle */
angle = fmod(angle, 360.0);
if ( angle < 0 )
angle += 360.0;
if ( 337.5 < angle || angle <= 22.5 )
return; /* Near zero angle - no change! - At least not at this time */
/* Handle special cases */
switch (kernel->type) {
/* These built-in kernels are cylindrical kernels, rotating is useless */
case GaussianKernel:
case DoGKernel:
case LoGKernel:
case DiskKernel:
case PeaksKernel:
case LaplacianKernel:
case ChebyshevKernel:
case ManhattanKernel:
case EuclideanKernel:
return;
/* These may be rotatable at non-90 angles in the future */
/* but simply rotating them in multiples of 90 degrees is useless */
case SquareKernel:
case DiamondKernel:
case PlusKernel:
case CrossKernel:
return;
/* These only allows a +/-90 degree rotation (by transpose) */
/* A 180 degree rotation is useless */
case BlurKernel:
if ( 135.0 < angle && angle <= 225.0 )
return;
if ( 225.0 < angle && angle <= 315.0 )
angle -= 180;
break;
default:
break;
}
/* Attempt rotations by 45 degrees -- 3x3 kernels only */
if ( 22.5 < fmod(angle,90.0) && fmod(angle,90.0) <= 67.5 )
{
if ( kernel->width == 3 && kernel->height == 3 )
{ /* Rotate a 3x3 square by 45 degree angle */
double t = kernel->values[0];
kernel->values[0] = kernel->values[3];
kernel->values[3] = kernel->values[6];
kernel->values[6] = kernel->values[7];
kernel->values[7] = kernel->values[8];
kernel->values[8] = kernel->values[5];
kernel->values[5] = kernel->values[2];
kernel->values[2] = kernel->values[1];
kernel->values[1] = t;
/* rotate non-centered origin */
if ( kernel->x != 1 || kernel->y != 1 ) {
ssize_t x,y;
x = (ssize_t) kernel->x-1;
y = (ssize_t) kernel->y-1;
if ( x == y ) x = 0;
else if ( x == 0 ) x = -y;
else if ( x == -y ) y = 0;
else if ( y == 0 ) y = x;
kernel->x = (ssize_t) x+1;
kernel->y = (ssize_t) y+1;
}
angle = fmod(angle+315.0, 360.0); /* angle reduced 45 degrees */
kernel->angle = fmod(kernel->angle+45.0, 360.0);
}
else
perror("Unable to rotate non-3x3 kernel by 45 degrees");
}
if ( 45.0 < fmod(angle, 180.0) && fmod(angle,180.0) <= 135.0 )
{
if ( kernel->width == 1 || kernel->height == 1 )
{ /* Do a transpose of a 1 dimensional kernel,
** which results in a fast 90 degree rotation of some type.
*/
ssize_t
t;
t = (ssize_t) kernel->width;
kernel->width = kernel->height;
kernel->height = (size_t) t;
t = kernel->x;
kernel->x = kernel->y;
kernel->y = t;
if ( kernel->width == 1 ) {
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
} else {
angle = fmod(angle+90.0, 360.0); /* angle increased 90 degrees */
kernel->angle = fmod(kernel->angle+270.0, 360.0);
}
}
else if ( kernel->width == kernel->height )
{ /* Rotate a square array of values by 90 degrees */
{ register size_t
i,j,x,y;
register double
*k,t;
k=kernel->values;
for( i=0, x=kernel->width-1; i<=x; i++, x--)
for( j=0, y=kernel->height-1; j<y; j++, y--)
{ t = k[i+j*kernel->width];
k[i+j*kernel->width] = k[j+x*kernel->width];
k[j+x*kernel->width] = k[x+y*kernel->width];
k[x+y*kernel->width] = k[y+i*kernel->width];
k[y+i*kernel->width] = t;
}
}
/* rotate the origin - relative to center of array */
{ register ssize_t x,y;
x = (ssize_t) (kernel->x*2-kernel->width+1);
y = (ssize_t) (kernel->y*2-kernel->height+1);
kernel->x = (ssize_t) ( -y +(ssize_t) kernel->width-1)/2;
kernel->y = (ssize_t) ( +x +(ssize_t) kernel->height-1)/2;
}
angle = fmod(angle+270.0, 360.0); /* angle reduced 90 degrees */
kernel->angle = fmod(kernel->angle+90.0, 360.0);
}
else
perror("Unable to rotate a non-square, non-linear kernel 90 degrees");
}
if ( 135.0 < angle && angle <= 225.0 )
{
/* For a 180 degree rotation - also know as a reflection
* This is actually a very very common operation!
* Basically all that is needed is a reversal of the kernel data!
* And a reflection of the origon
*/
double
t;
register double
*k;
size_t
i,
j;
k=kernel->values;
for ( i=0, j=kernel->width*kernel->height-1; i<j; i++, j--)
t=k[i], k[i]=k[j], k[j]=t;
kernel->x = (ssize_t) kernel->width - kernel->x - 1;
kernel->y = (ssize_t) kernel->height - kernel->y - 1;
angle = fmod(angle-180.0, 360.0); /* angle+180 degrees */
kernel->angle = fmod(kernel->angle+180.0, 360.0);
}
/* At this point angle should at least between -45 (315) and +45 degrees
* In the future some form of non-orthogonal angled rotates could be
* performed here, posibily with a linear kernel restriction.
*/
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e G e o m e t r y K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleGeometryKernelInfo() takes a geometry argument string, typically
% provided as a "-set option:convolve:scale {geometry}" user setting,
% and modifies the kernel according to the parsed arguments of that setting.
%
% The first argument (and any normalization flags) are passed to
% ScaleKernelInfo() to scale/normalize the kernel. The second argument
% is then passed to UnityAddKernelInfo() to add a scled unity kernel
% into the scaled/normalized kernel.
%
% The format of the ScaleGeometryKernelInfo method is:
%
% void ScaleGeometryKernelInfo(KernelInfo *kernel,
% const double scaling_factor,const MagickStatusType normalize_flags)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel to modify
%
% o geometry:
% The geometry string to parse, typically from the user provided
% "-set option:convolve:scale {geometry}" setting.
%
*/
MagickExport void ScaleGeometryKernelInfo (KernelInfo *kernel,
const char *geometry)
{
GeometryFlags
flags;
GeometryInfo
args;
SetGeometryInfo(&args);
flags = (GeometryFlags) ParseGeometry(geometry, &args);
#if 0
/* For Debugging Geometry Input */
(void) FormatLocaleFile(stderr, "Geometry = 0x%04X : %lg x %lg %+lg %+lg\n",
flags, args.rho, args.sigma, args.xi, args.psi );
#endif
if ( (flags & PercentValue) != 0 ) /* Handle Percentage flag*/
args.rho *= 0.01, args.sigma *= 0.01;
if ( (flags & RhoValue) == 0 ) /* Set Defaults for missing args */
args.rho = 1.0;
if ( (flags & SigmaValue) == 0 )
args.sigma = 0.0;
/* Scale/Normalize the input kernel */
ScaleKernelInfo(kernel, args.rho, flags);
/* Add Unity Kernel, for blending with original */
if ( (flags & SigmaValue) != 0 )
UnityAddKernelInfo(kernel, args.sigma);
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S c a l e K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ScaleKernelInfo() scales the given kernel list by the given amount, with or
% without normalization of the sum of the kernel values (as per given flags).
%
% By default (no flags given) the values within the kernel is scaled
% directly using given scaling factor without change.
%
% If either of the two 'normalize_flags' are given the kernel will first be
% normalized and then further scaled by the scaling factor value given.
%
% Kernel normalization ('normalize_flags' given) is designed to ensure that
% any use of the kernel scaling factor with 'Convolve' or 'Correlate'
% morphology methods will fall into -1.0 to +1.0 range. Note that for
% non-HDRI versions of IM this may cause images to have any negative results
% clipped, unless some 'bias' is used.
%
% More specifically. Kernels which only contain positive values (such as a
% 'Gaussian' kernel) will be scaled so that those values sum to +1.0,
% ensuring a 0.0 to +1.0 output range for non-HDRI images.
%
% For Kernels that contain some negative values, (such as 'Sharpen' kernels)
% the kernel will be scaled by the absolute of the sum of kernel values, so
% that it will generally fall within the +/- 1.0 range.
%
% For kernels whose values sum to zero, (such as 'Laplician' kernels) kernel
% will be scaled by just the sum of the postive values, so that its output
% range will again fall into the +/- 1.0 range.
%
% For special kernels designed for locating shapes using 'Correlate', (often
% only containing +1 and -1 values, representing foreground/brackground
% matching) a special normalization method is provided to scale the positive
% values separately to those of the negative values, so the kernel will be
% forced to become a zero-sum kernel better suited to such searches.
%
% WARNING: Correct normalization of the kernel assumes that the '*_range'
% attributes within the kernel structure have been correctly set during the
% kernels creation.
%
% NOTE: The values used for 'normalize_flags' have been selected specifically
% to match the use of geometry options, so that '!' means NormalizeValue, '^'
% means CorrelateNormalizeValue. All other GeometryFlags values are ignored.
%
% The format of the ScaleKernelInfo method is:
%
% void ScaleKernelInfo(KernelInfo *kernel, const double scaling_factor,
% const MagickStatusType normalize_flags )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scaling_factor:
% multiply all values (after normalization) by this factor if not
% zero. If the kernel is normalized regardless of any flags.
%
% o normalize_flags:
% GeometryFlags defining normalization method to use.
% specifically: NormalizeValue, CorrelateNormalizeValue,
% and/or PercentValue
%
*/
MagickExport void ScaleKernelInfo(KernelInfo *kernel,
const double scaling_factor,const GeometryFlags normalize_flags)
{
register ssize_t
i;
register double
pos_scale,
neg_scale;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ScaleKernelInfo(kernel->next, scaling_factor, normalize_flags);
/* Normalization of Kernel */
pos_scale = 1.0;
if ( (normalize_flags&NormalizeValue) != 0 ) {
if ( fabs(kernel->positive_range + kernel->negative_range) >= MagickEpsilon )
/* non-zero-summing kernel (generally positive) */
pos_scale = fabs(kernel->positive_range + kernel->negative_range);
else
/* zero-summing kernel */
pos_scale = kernel->positive_range;
}
/* Force kernel into a normalized zero-summing kernel */
if ( (normalize_flags&CorrelateNormalizeValue) != 0 ) {
pos_scale = ( fabs(kernel->positive_range) >= MagickEpsilon )
? kernel->positive_range : 1.0;
neg_scale = ( fabs(kernel->negative_range) >= MagickEpsilon )
? -kernel->negative_range : 1.0;
}
else
neg_scale = pos_scale;
/* finialize scaling_factor for positive and negative components */
pos_scale = scaling_factor/pos_scale;
neg_scale = scaling_factor/neg_scale;
for (i=0; i < (ssize_t) (kernel->width*kernel->height); i++)
if ( ! IsNaN(kernel->values[i]) )
kernel->values[i] *= (kernel->values[i] >= 0) ? pos_scale : neg_scale;
/* convolution output range */
kernel->positive_range *= pos_scale;
kernel->negative_range *= neg_scale;
/* maximum and minimum values in kernel */
kernel->maximum *= (kernel->maximum >= 0.0) ? pos_scale : neg_scale;
kernel->minimum *= (kernel->minimum >= 0.0) ? pos_scale : neg_scale;
/* swap kernel settings if user's scaling factor is negative */
if ( scaling_factor < MagickEpsilon ) {
double t;
t = kernel->positive_range;
kernel->positive_range = kernel->negative_range;
kernel->negative_range = t;
t = kernel->maximum;
kernel->maximum = kernel->minimum;
kernel->minimum = 1;
}
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S h o w K e r n e l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ShowKernelInfo() outputs the details of the given kernel defination to
% standard error, generally due to a users 'showKernel' option request.
%
% The format of the ShowKernelInfo method is:
%
% void ShowKernelInfo(const KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ShowKernelInfo(const KernelInfo *kernel)
{
const KernelInfo
*k;
size_t
c, i, u, v;
for (c=0, k=kernel; k != (KernelInfo *) NULL; c++, k=k->next ) {
(void) FormatLocaleFile(stderr, "Kernel");
if ( kernel->next != (KernelInfo *) NULL )
(void) FormatLocaleFile(stderr, " #%lu", (unsigned long) c );
(void) FormatLocaleFile(stderr, " \"%s",
CommandOptionToMnemonic(MagickKernelOptions, k->type) );
if ( fabs(k->angle) >= MagickEpsilon )
(void) FormatLocaleFile(stderr, "@%lg", k->angle);
(void) FormatLocaleFile(stderr, "\" of size %lux%lu%+ld%+ld",(unsigned long)
k->width,(unsigned long) k->height,(long) k->x,(long) k->y);
(void) FormatLocaleFile(stderr,
" with values from %.*lg to %.*lg\n",
GetMagickPrecision(), k->minimum,
GetMagickPrecision(), k->maximum);
(void) FormatLocaleFile(stderr, "Forming a output range from %.*lg to %.*lg",
GetMagickPrecision(), k->negative_range,
GetMagickPrecision(), k->positive_range);
if ( fabs(k->positive_range+k->negative_range) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Zero-Summing)\n");
else if ( fabs(k->positive_range+k->negative_range-1.0) < MagickEpsilon )
(void) FormatLocaleFile(stderr, " (Normalized)\n");
else
(void) FormatLocaleFile(stderr, " (Sum %.*lg)\n",
GetMagickPrecision(), k->positive_range+k->negative_range);
for (i=v=0; v < k->height; v++) {
(void) FormatLocaleFile(stderr, "%2lu:", (unsigned long) v );
for (u=0; u < k->width; u++, i++)
if ( IsNaN(k->values[i]) )
(void) FormatLocaleFile(stderr," %*s", GetMagickPrecision()+3, "nan");
else
(void) FormatLocaleFile(stderr," %*.*lg", GetMagickPrecision()+3,
GetMagickPrecision(), k->values[i]);
(void) FormatLocaleFile(stderr,"\n");
}
}
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U n i t y A d d K e r n a l I n f o %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UnityAddKernelInfo() Adds a given amount of the 'Unity' Convolution Kernel
% to the given pre-scaled and normalized Kernel. This in effect adds that
% amount of the original image into the resulting convolution kernel. This
% value is usually provided by the user as a percentage value in the
% 'convolve:scale' setting.
%
% The resulting effect is to convert the defined kernels into blended
% soft-blurs, unsharp kernels or into sharpening kernels.
%
% The format of the UnityAdditionKernelInfo method is:
%
% void UnityAdditionKernelInfo(KernelInfo *kernel, const double scale )
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
% o scale:
% scaling factor for the unity kernel to be added to
% the given kernel.
%
*/
MagickExport void UnityAddKernelInfo(KernelInfo *kernel,
const double scale)
{
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
UnityAddKernelInfo(kernel->next, scale);
/* Add the scaled unity kernel to the existing kernel */
kernel->values[kernel->x+kernel->y*kernel->width] += scale;
CalcKernelMetaData(kernel); /* recalculate the meta-data */
return;
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% Z e r o K e r n e l N a n s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% ZeroKernelNans() replaces any special 'nan' value that may be present in
% the kernel with a zero value. This is typically done when the kernel will
% be used in special hardware (GPU) convolution processors, to simply
% matters.
%
% The format of the ZeroKernelNans method is:
%
% void ZeroKernelNans (KernelInfo *kernel)
%
% A description of each parameter follows:
%
% o kernel: the Morphology/Convolution kernel
%
*/
MagickExport void ZeroKernelNans(KernelInfo *kernel)
{
register size_t
i;
/* do the other kernels in a multi-kernel list first */
if ( kernel->next != (KernelInfo *) NULL)
ZeroKernelNans(kernel->next);
for (i=0; i < (kernel->width*kernel->height); i++)
if ( IsNaN(kernel->values[i]) )
kernel->values[i] = 0.0;
return;
}
|
middle3r.c | /*
* Date: 11 December 2015
* Contact: Thomas Peyrin - thomas.peyrin@gmail.com
*/
/*
* Simmulation of boomerang analysis for Skinny
* Date: March 21, 2020
* Author: Hosein Hadipour
* Contact: hsn.hadipour@gmail.com
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <math.h>
#include <omp.h>
#include <stdbool.h>
// #define DEBUG 1
#define Nthreads 1
// Table that encodes the parameters of the various Skinny versions:
// (block size, key size, number of rounds)
//Skinny-64-64: 32 rounds
//Skinny-64-128: 36 rounds
//Skinny-64-192: 40 rounds
//Skinny-128-128: 40 rounds
//Skinny-128-256: 48 rounds
//Skinny-128-384: 56 rounds
int versions[6][3] = {{64, 64, 32}, {64, 128, 36}, {64, 192, 40}, {128, 128, 40}, {128, 256, 48}, {128, 384, 56}};
// Packing of data is done as follows (state[i][j] stands for row i and column j):
// 0 1 2 3
// 4 5 6 7
// 8 9 10 11
//12 13 14 15
// 4-bit Sbox
const unsigned char sbox_4[16] = {12, 6, 9, 0, 1, 10, 2, 11, 3, 8, 5, 13, 4, 14, 7, 15};
const unsigned char sbox_4_inv[16] = {3, 4, 6, 8, 12, 10, 1, 14, 9, 2, 5, 7, 0, 11, 13, 15};
// 8-bit Sbox
const unsigned char sbox_8[256] = {0x65, 0x4c, 0x6a, 0x42, 0x4b, 0x63, 0x43, 0x6b, 0x55, 0x75, 0x5a, 0x7a, 0x53, 0x73, 0x5b, 0x7b, 0x35, 0x8c, 0x3a, 0x81, 0x89, 0x33, 0x80, 0x3b, 0x95, 0x25, 0x98, 0x2a, 0x90, 0x23, 0x99, 0x2b, 0xe5, 0xcc, 0xe8, 0xc1, 0xc9, 0xe0, 0xc0, 0xe9, 0xd5, 0xf5, 0xd8, 0xf8, 0xd0, 0xf0, 0xd9, 0xf9, 0xa5, 0x1c, 0xa8, 0x12, 0x1b, 0xa0, 0x13, 0xa9, 0x05, 0xb5, 0x0a, 0xb8, 0x03, 0xb0, 0x0b, 0xb9, 0x32, 0x88, 0x3c, 0x85, 0x8d, 0x34, 0x84, 0x3d, 0x91, 0x22, 0x9c, 0x2c, 0x94, 0x24, 0x9d, 0x2d, 0x62, 0x4a, 0x6c, 0x45, 0x4d, 0x64, 0x44, 0x6d, 0x52, 0x72, 0x5c, 0x7c, 0x54, 0x74, 0x5d, 0x7d, 0xa1, 0x1a, 0xac, 0x15, 0x1d, 0xa4, 0x14, 0xad, 0x02, 0xb1, 0x0c, 0xbc, 0x04, 0xb4, 0x0d, 0xbd, 0xe1, 0xc8, 0xec, 0xc5, 0xcd, 0xe4, 0xc4, 0xed, 0xd1, 0xf1, 0xdc, 0xfc, 0xd4, 0xf4, 0xdd, 0xfd, 0x36, 0x8e, 0x38, 0x82, 0x8b, 0x30, 0x83, 0x39, 0x96, 0x26, 0x9a, 0x28, 0x93, 0x20, 0x9b, 0x29, 0x66, 0x4e, 0x68, 0x41, 0x49, 0x60, 0x40, 0x69, 0x56, 0x76, 0x58, 0x78, 0x50, 0x70, 0x59, 0x79, 0xa6, 0x1e, 0xaa, 0x11, 0x19, 0xa3, 0x10, 0xab, 0x06, 0xb6, 0x08, 0xba, 0x00, 0xb3, 0x09, 0xbb, 0xe6, 0xce, 0xea, 0xc2, 0xcb, 0xe3, 0xc3, 0xeb, 0xd6, 0xf6, 0xda, 0xfa, 0xd3, 0xf3, 0xdb, 0xfb, 0x31, 0x8a, 0x3e, 0x86, 0x8f, 0x37, 0x87, 0x3f, 0x92, 0x21, 0x9e, 0x2e, 0x97, 0x27, 0x9f, 0x2f, 0x61, 0x48, 0x6e, 0x46, 0x4f, 0x67, 0x47, 0x6f, 0x51, 0x71, 0x5e, 0x7e, 0x57, 0x77, 0x5f, 0x7f, 0xa2, 0x18, 0xae, 0x16, 0x1f, 0xa7, 0x17, 0xaf, 0x01, 0xb2, 0x0e, 0xbe, 0x07, 0xb7, 0x0f, 0xbf, 0xe2, 0xca, 0xee, 0xc6, 0xcf, 0xe7, 0xc7, 0xef, 0xd2, 0xf2, 0xde, 0xfe, 0xd7, 0xf7, 0xdf, 0xff};
const unsigned char sbox_8_inv[256] = {0xac, 0xe8, 0x68, 0x3c, 0x6c, 0x38, 0xa8, 0xec, 0xaa, 0xae, 0x3a, 0x3e, 0x6a, 0x6e, 0xea, 0xee, 0xa6, 0xa3, 0x33, 0x36, 0x66, 0x63, 0xe3, 0xe6, 0xe1, 0xa4, 0x61, 0x34, 0x31, 0x64, 0xa1, 0xe4, 0x8d, 0xc9, 0x49, 0x1d, 0x4d, 0x19, 0x89, 0xcd, 0x8b, 0x8f, 0x1b, 0x1f, 0x4b, 0x4f, 0xcb, 0xcf, 0x85, 0xc0, 0x40, 0x15, 0x45, 0x10, 0x80, 0xc5, 0x82, 0x87, 0x12, 0x17, 0x42, 0x47, 0xc2, 0xc7, 0x96, 0x93, 0x03, 0x06, 0x56, 0x53, 0xd3, 0xd6, 0xd1, 0x94, 0x51, 0x04, 0x01, 0x54, 0x91, 0xd4, 0x9c, 0xd8, 0x58, 0x0c, 0x5c, 0x08, 0x98, 0xdc, 0x9a, 0x9e, 0x0a, 0x0e, 0x5a, 0x5e, 0xda, 0xde, 0x95, 0xd0, 0x50, 0x05, 0x55, 0x00, 0x90, 0xd5, 0x92, 0x97, 0x02, 0x07, 0x52, 0x57, 0xd2, 0xd7, 0x9d, 0xd9, 0x59, 0x0d, 0x5d, 0x09, 0x99, 0xdd, 0x9b, 0x9f, 0x0b, 0x0f, 0x5b, 0x5f, 0xdb, 0xdf, 0x16, 0x13, 0x83, 0x86, 0x46, 0x43, 0xc3, 0xc6, 0x41, 0x14, 0xc1, 0x84, 0x11, 0x44, 0x81, 0xc4, 0x1c, 0x48, 0xc8, 0x8c, 0x4c, 0x18, 0x88, 0xcc, 0x1a, 0x1e, 0x8a, 0x8e, 0x4a, 0x4e, 0xca, 0xce, 0x35, 0x60, 0xe0, 0xa5, 0x65, 0x30, 0xa0, 0xe5, 0x32, 0x37, 0xa2, 0xa7, 0x62, 0x67, 0xe2, 0xe7, 0x3d, 0x69, 0xe9, 0xad, 0x6d, 0x39, 0xa9, 0xed, 0x3b, 0x3f, 0xab, 0xaf, 0x6b, 0x6f, 0xeb, 0xef, 0x26, 0x23, 0xb3, 0xb6, 0x76, 0x73, 0xf3, 0xf6, 0x71, 0x24, 0xf1, 0xb4, 0x21, 0x74, 0xb1, 0xf4, 0x2c, 0x78, 0xf8, 0xbc, 0x7c, 0x28, 0xb8, 0xfc, 0x2a, 0x2e, 0xba, 0xbe, 0x7a, 0x7e, 0xfa, 0xfe, 0x25, 0x70, 0xf0, 0xb5, 0x75, 0x20, 0xb0, 0xf5, 0x22, 0x27, 0xb2, 0xb7, 0x72, 0x77, 0xf2, 0xf7, 0x2d, 0x79, 0xf9, 0xbd, 0x7d, 0x29, 0xb9, 0xfd, 0x2b, 0x2f, 0xbb, 0xbf, 0x7b, 0x7f, 0xfb, 0xff};
// ShiftAndSwitchRows permutation
const unsigned char P[16] = {0, 1, 2, 3, 7, 4, 5, 6, 10, 11, 8, 9, 13, 14, 15, 12};
const unsigned char P_inv[16] = {0, 1, 2, 3, 5, 6, 7, 4, 10, 11, 8, 9, 15, 12, 13, 14};
// Tweakey permutation
const unsigned char TWEAKEY_P[16] = {9, 15, 8, 13, 10, 14, 12, 11, 0, 1, 2, 3, 4, 5, 6, 7};
const unsigned char TWEAKEY_P_inv[16] = {8, 9, 10, 11, 12, 13, 14, 15, 2, 0, 4, 7, 6, 3, 5, 1};
// round constants
const unsigned char RC[62] = {
0x01, 0x03, 0x07, 0x0F, 0x1F, 0x3E, 0x3D, 0x3B, 0x37, 0x2F,
0x1E, 0x3C, 0x39, 0x33, 0x27, 0x0E, 0x1D, 0x3A, 0x35, 0x2B,
0x16, 0x2C, 0x18, 0x30, 0x21, 0x02, 0x05, 0x0B, 0x17, 0x2E,
0x1C, 0x38, 0x31, 0x23, 0x06, 0x0D, 0x1B, 0x36, 0x2D, 0x1A,
0x34, 0x29, 0x12, 0x24, 0x08, 0x11, 0x22, 0x04, 0x09, 0x13,
0x26, 0x0c, 0x19, 0x32, 0x25, 0x0a, 0x15, 0x2a, 0x14, 0x28,
0x10, 0x20};
FILE *fic;
void display_matrix(unsigned char state[4][4], int ver)
{
int i;
unsigned char input[16];
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
for (i = 0; i < 8; i++)
fprintf(fic, "%02x", input[i]);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
for (i = 0; i < 16; i++)
fprintf(fic, "%02x", input[i]);
}
}
void display_cipher_state(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int k;
fprintf(fic, "S = ");
display_matrix(state, ver);
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
fprintf(fic, " - TK%i = ", k + 1);
display_matrix(keyCells[k], ver);
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state
void AddKey(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the TWEAKEY permutation
pos = TWEAKEY_P[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
//application of LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j]) & 0x8) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
}
// Extract and apply the subtweakey to the internal state (must be the two top rows XORed together), then update the tweakey state (inverse function}
void AddKey_inv(unsigned char state[4][4], unsigned char keyCells[3][4][4], int ver)
{
int i, j, k;
unsigned char pos;
unsigned char keyCells_tmp[3][4][4];
// update the subtweakey states with the permutation
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse TWEAKEY permutation
pos = TWEAKEY_P_inv[j + 4 * i];
keyCells_tmp[k][i][j] = keyCells[k][pos >> 2][pos & 0x3];
}
}
}
// update the subtweakey states with the LFSRs
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 2; i <= 3; i++)
{
for (j = 0; j < 4; j++)
{
//application of inverse LFSRs for TK updates
if (k == 1)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7) ^ ((keyCells_tmp[k][i][j] << 3) & 0x8) ^ ((keyCells_tmp[k][i][j]) & 0x8);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] >> 1) & 0x7F) ^ ((keyCells_tmp[k][i][j] << 7) & 0x80) ^ ((keyCells_tmp[k][i][j] << 1) & 0x80);
}
else if (k == 2)
{
if (versions[ver][0] == 64)
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xE) ^ ((keyCells_tmp[k][i][j] >> 3) & 0x1) ^ ((keyCells_tmp[k][i][j] >> 2) & 0x1);
else
keyCells_tmp[k][i][j] = ((keyCells_tmp[k][i][j] << 1) & 0xFE) ^ ((keyCells_tmp[k][i][j] >> 7) & 0x01) ^ ((keyCells_tmp[k][i][j] >> 5) & 0x01);
}
}
}
}
for (k = 0; k < (int)(versions[ver][1] / versions[ver][0]); k++)
{
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
keyCells[k][i][j] = keyCells_tmp[k][i][j];
}
}
}
// apply the subtweakey to the internal state
for (i = 0; i <= 1; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] ^= keyCells[0][i][j];
if (2 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j];
else if (3 * versions[ver][0] == versions[ver][1])
state[i][j] ^= keyCells[1][i][j] ^ keyCells[2][i][j];
}
}
}
// Apply the constants: using a LFSR counter on 6 bits, we XOR the 6 bits to the first 6 bits of the internal state
void AddConstants(unsigned char state[4][4], int r)
{
state[0][0] ^= (RC[r] & 0xf);
state[1][0] ^= ((RC[r] >> 4) & 0x3);
state[2][0] ^= 0x2;
}
// apply the 4-bit Sbox
void SubCell4(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4[state[i][j]];
}
// apply the 4-bit inverse Sbox
void SubCell4_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_4_inv[state[i][j]];
}
// apply the 8-bit Sbox
void SubCell8(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8[state[i][j]];
}
// apply the 8-bit inverse Sbox
void SubCell8_inv(unsigned char state[4][4])
{
int i, j;
for (i = 0; i < 4; i++)
for (j = 0; j < 4; j++)
state[i][j] = sbox_8_inv[state[i][j]];
}
// Apply the ShiftRows function
void ShiftRows(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the ShiftRows permutation
pos = P[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the inverse ShiftRows function
void ShiftRows_inv(unsigned char state[4][4])
{
int i, j, pos;
unsigned char state_tmp[4][4];
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
//application of the inverse ShiftRows permutation
pos = P_inv[j + 4 * i];
state_tmp[i][j] = state[pos >> 2][pos & 0x3];
}
}
for (i = 0; i < 4; i++)
{
for (j = 0; j < 4; j++)
{
state[i][j] = state_tmp[i][j];
}
}
}
// Apply the linear diffusion matrix
//M =
//1 0 1 1
//1 0 0 0
//0 1 1 0
//1 0 1 0
void MixColumn(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
state[1][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[3][j] ^= state[2][j];
temp = state[3][j];
state[3][j] = state[2][j];
state[2][j] = state[1][j];
state[1][j] = state[0][j];
state[0][j] = temp;
}
}
// Apply the inverse linear diffusion matrix
void MixColumn_inv(unsigned char state[4][4])
{
int j;
unsigned char temp;
for (j = 0; j < 4; j++)
{
temp = state[3][j];
state[3][j] = state[0][j];
state[0][j] = state[1][j];
state[1][j] = state[2][j];
state[2][j] = temp;
state[3][j] ^= state[2][j];
state[2][j] ^= state[0][j];
state[1][j] ^= state[2][j];
}
}
// decryption function of Skinny
void dec(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char dummy[4][4] = {{0}};
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
for (i = r - 1; i >= 0; i--)
{
AddKey(dummy, keyCells, ver);
}
#ifdef DEBUG
fprintf(fic, "DEC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = r - 1; i >= 0; i--)
{
MixColumn_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after MixColumn_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after ShiftRows_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey_inv(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddKey_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after AddConstants_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
SubCell4_inv(state);
else
SubCell8_inv(state);
#ifdef DEBUG
fprintf(fic, "DEC - round %.2i - after SubCell_inv: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
}
#ifdef DEBUG
fprintf(fic, "DEC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// encryption function of Skinny
void enc(unsigned char *input, const unsigned char *userkey, int ver, int r)
{
unsigned char state[4][4];
unsigned char keyCells[3][4][4];
int i;
memset(keyCells, 0, 48);
for (i = 0; i < 16; i++)
{
if (versions[ver][0] == 64)
{
if (i & 1)
{
state[i >> 2][i & 0x3] = input[i >> 1] & 0xF;
keyCells[0][i >> 2][i & 0x3] = userkey[i >> 1] & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = userkey[(i + 16) >> 1] & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = userkey[(i + 32) >> 1] & 0xF;
}
else
{
state[i >> 2][i & 0x3] = (input[i >> 1] >> 4) & 0xF;
keyCells[0][i >> 2][i & 0x3] = (userkey[i >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 128)
keyCells[1][i >> 2][i & 0x3] = (userkey[(i + 16) >> 1] >> 4) & 0xF;
if (versions[ver][1] >= 192)
keyCells[2][i >> 2][i & 0x3] = (userkey[(i + 32) >> 1] >> 4) & 0xF;
}
}
else if (versions[ver][0] == 128)
{
state[i >> 2][i & 0x3] = input[i] & 0xFF;
keyCells[0][i >> 2][i & 0x3] = userkey[i] & 0xFF;
if (versions[ver][1] >= 256)
keyCells[1][i >> 2][i & 0x3] = userkey[i + 16] & 0xFF;
if (versions[ver][1] >= 384)
keyCells[2][i >> 2][i & 0x3] = userkey[i + 32] & 0xFF;
}
}
#ifdef DEBUG
fprintf(fic, "ENC - initial state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
for (i = 0; i < r; i++)
{
if (versions[ver][0] == 64)
SubCell4(state);
else
SubCell8(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after SubCell: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddConstants(state, i);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddConstants: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
AddKey(state, keyCells, ver);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after AddKey: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
ShiftRows(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after ShiftRows: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
MixColumn(state);
#ifdef DEBUG
fprintf(fic, "ENC - round %.2i - after MixColumn: ", i);
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
} //The last subtweakey should not be added
#ifdef DEBUG
fprintf(fic, "ENC - final state: ");
display_cipher_state(state, keyCells, ver);
fprintf(fic, "\n");
#endif
if (versions[ver][0] == 64)
{
for (i = 0; i < 8; i++)
input[i] = ((state[(2 * i) >> 2][(2 * i) & 0x3] & 0xF) << 4) | (state[(2 * i + 1) >> 2][(2 * i + 1) & 0x3] & 0xF);
}
else if (versions[ver][0] == 128)
{
for (i = 0; i < 16; i++)
input[i] = state[i >> 2][i & 0x3] & 0xFF;
}
}
// generate test vectors for all the versions of Skinny
void TestVectors(int ver)
{
unsigned char p[16];
unsigned char c[16];
unsigned char k[48];
int n;
for (n = 1; n < 10; n++)
{
int i;
for (i = 0; i < (versions[ver][0] >> 3); i++)
c[i] = p[i] = rand() & 0xff;
for (i = 0; i < (versions[ver][0] >> 3); i++)
printf("%02x", p[i]);
printf("\n");
for (i = 0; i < (versions[ver][1] >> 3); i++)
k[i] = rand() & 0xff;
fprintf(fic, "TK = ");
for (i = 0; i < (versions[ver][1] >> 3); i++)
fprintf(fic, "%02x", k[i]);
fprintf(fic, "\n");
fprintf(fic, "P = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", p[i]);
fprintf(fic, "\n");
enc(c, k, ver, 10);
fprintf(fic, "C = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n");
dec(c, k, ver, 10);
fprintf(fic, "P' = ");
for (i = 0; i < (versions[ver][0] >> 3); i++)
fprintf(fic, "%02x", c[i]);
fprintf(fic, "\n\n");
}
}
int boomerang(int r, int ver, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
int i;
unsigned char p1[16], p2[16];
unsigned char c3[16], c4[16];
unsigned char k1[48], k2[48], k3[48], k4[48];
// randomly choose k1
for (i = 0; i < (versions[ver][1] >> 3); i++)
k1[i] = rand() & 0xff;
// derive k2
for (i = 0; i < (versions[ver][1] >> 3); i++)
k2[i] = k1[i] ^ dk1[i];
// derive k3
for (i = 0; i < (versions[ver][1] >> 3); i++)
k3[i] = k1[i] ^ dk2[i];
// derive k4
for (i = 0; i < (versions[ver][1] >> 3); i++)
k4[i] = k2[i] ^ dk2[i];
int num = 0;
for (int t = 0; t < N3; t++)
{
// randomly choose p1
for (i = 0; i < (versions[ver][0] >> 3); i++)
p1[i] = rand() & 0xff;
// derive p2
for (i = 0; i < (versions[ver][0] >> 3); i++)
p2[i] = p1[i] ^ dp[i];
enc(p1, k1, ver, r);
enc(p2, k2, ver, r);
// derive c3
for (i = 0; i < (versions[ver][0] >> 3); i++)
c3[i] = p1[i] ^ dc[i];
// derive c4
for (i = 0; i < (versions[ver][0] >> 3); i++)
c4[i] = p2[i] ^ dc[i];
dec(c3, k3, ver, r);
dec(c4, k4, ver, r);
bool flag = 1;
for (i = 0; i < (versions[ver][0] >> 3); i++)
if ((c3[i] ^ c4[i]) != dp[i])
flag = 0;
if (flag)
{
num++;
}
}
return num;
}
double send_boomerangs(int R, int ver, int N1, int N2, int N3, unsigned char *dp, unsigned char *dc, unsigned char *dk1, unsigned char *dk2)
{
// Parallel execution
int NUM[N1];
int counter;
printf("#Rounds: %d rounds\n", R);
printf("#Total Queries = (#Parallel threads) * (#Bunches per thread) * (#Queries per bunch) = %d * %d * %d = 2^(%f)\n", N1, N2, N3, log(N1 * N2 * N3) / log(2));
clock_t clock_timer;
double wall_timer;
clock_timer = clock();
wall_timer = omp_get_wtime();
omp_set_num_threads(N1);
#pragma omp parallel for
for (counter = 0; counter < N1; counter++)
{
int num = 0;
for (int j = 0; j < N2; j++)
{
num += boomerang(R, ver, N3, dp, dc, dk1, dk2);
}
int ID = omp_get_thread_num();
NUM[ID] = num;
}
printf("%s: %0.4f\n", "time on clock", (double)(clock() - clock_timer) / CLOCKS_PER_SEC);
printf("%s: %0.4f\n", "time on wall", omp_get_wtime() - wall_timer);
double sum = 0;
double sum_temp = 1;
for (int i = 0; i < N1; i++)
sum += NUM[i];
printf("sum = %f\n", sum);
sum_temp = (double)(N1 * N2 * N3) / sum;
printf("2^(-%f)\n\n", log(sum_temp) / log(2));
printf("##########################\n");
return sum;
}
void convert_hexstr_to_statearray(int ver, char hex_str[], unsigned char dx[16])
{
for (int i = 0; i < (versions[ver][0] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dx[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
void convert_hexstr_to_tweakarray(int ver, char hex_str[], unsigned char dt[48])
{
for (int i = 0; i < (versions[ver][1] >> 3); i++)
{
char hex[2];
hex[0] = hex_str[2 * i];
hex[1] = hex_str[2 * i + 1];
dt[i] = (unsigned char)(strtol(hex, NULL, 16) & 0xff);
}
}
int main()
{
// //test all versions of Skinny
// for (i = 0; i < (sizeof(versions) / sizeof(*versions)); i++)
// {
// sprintf(name, "test_vectors_%i_%i.txt", versions[i][0], versions[i][1]);
// fic = fopen(name, "w");
// fprintf(fic, "\n\nSkinny-%i/%i: \n", versions[i][0], versions[i][1]);
// TestVectors(i);
// fclose(fic);
// printf("Generating test vectors for Skinny-%i/%i - saved in file test_vectors_%i_%i.txt \n", versions[i][0], versions[i][1], versions[i][0], versions[i][1]);
// }
srand((unsigned)time(NULL)); // Initialization, should only be called once. int r = rand();
unsigned char dp[16];
unsigned char dc[16];
unsigned char dk1[48];
unsigned char dk2[48];
// #######################################################################################################
// #######################################################################################################
// ############################## User must change only the following lines ##############################
int n = 10; // Number of indipendent experiments
int R = 3; // Number of rounds
int ver = 1; // Determine the version:
// [0 = Skinny-64-64]
// [1 = Skinny-64-128]
// [2 = Skinny-64-192]
// [3 = Skinny-128-128]
// [4 = Skinny-128-256]
// [5 = Skinny-128-384]
char dp_str[] = "0200000002000200";
char dc_str[] = "0000000000000500";
char dk1_str[] = "00000C00000000000000010000000000";
char dk2_str[] = "00000004000000000000000800000000";
// #######################################################################################################
// #######################################################################################################
convert_hexstr_to_statearray(ver, dp_str, dp);
convert_hexstr_to_statearray(ver, dc_str, dc);
convert_hexstr_to_tweakarray(ver, dk1_str, dk1);
convert_hexstr_to_tweakarray(ver, dk2_str, dk2);
//########################## Number of queries #########################
int N1 = Nthreads; // Number of paralle threads : N1
int deg = 10;
int N2 = 1 << deg; // Number of bunches per threads : N2 = 2^(deg)
int N3 = 1 << 10; // Number of queries per bunches : N3
//################### Number of total queries : N1*N2*N3 ###############
double sum = 0;
for (int i = 0; i < n; i++)
{
sum += send_boomerangs(R, ver, N1, N2, N3, dp, dc, dk1, dk2);
}
sum = (double)(n * N1 * N2 * N3) / sum;
printf("\nAverage = 2^(-%0.2f)\n", log(sum) / log(2));
return 0;
}
|
GB_unop__identity_int64_uint32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_int64_uint32)
// op(A') function: GB (_unop_tran__identity_int64_uint32)
// C type: int64_t
// A type: uint32_t
// cast: int64_t cij = (int64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
int64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int64_t z = (int64_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int64_t z = (int64_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_int64_uint32)
(
int64_t *Cx, // Cx and Ax may be aliased
const uint32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint32_t aij = Ax [p] ;
int64_t z = (int64_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_int64_uint32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
dnnl_requantize-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/* \file dnnl_requantize-inl.h
* \brief
* \author Jin Huang, Xinyu Chen
*/
#ifndef MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
#define MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
#if MXNET_USE_ONEDNN == 1
#include <algorithm>
#include <string>
#include <vector>
#include "operator/nn/dnnl/dnnl_base-inl.h"
#include "operator/quantization/requantize-inl.h"
namespace mxnet {
namespace op {
template <typename DstType>
static void DNNLRequantizeForwardKer(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs,
const float real_range) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
// check shapes
size_t i_dim = inputs[0].shape().ndim();
size_t o_dim = outputs[0].shape().ndim();
CHECK_EQ(i_dim, o_dim);
float first_quantized_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>());
float first_real_range = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>());
float first_scale = first_real_range / first_quantized_range;
float second_real_range = real_range;
float second_quantized_range = 0.f;
if (std::is_same<DstType, int8_t>::value) {
second_quantized_range = MinAbs(MaxValue<DstType>(), MinValue<DstType>());
*outputs[1].data().dptr<float>() = -second_real_range;
*outputs[2].data().dptr<float>() = second_real_range;
} else if (std::is_same<DstType, uint8_t>::value) {
second_quantized_range = MaxValue<DstType>();
*outputs[1].data().dptr<float>() = 0.f;
*outputs[2].data().dptr<float>() = second_real_range;
} else {
LOG(FATAL) << "Unsupported requantize output type";
}
float second_scale = second_quantized_range / second_real_range;
float scale = first_scale * second_scale;
dnnl::primitive_attr attr;
const int mask = 0;
std::vector<float> scales = {scale};
attr.set_output_scales(mask, scales);
dnnl::engine cpu_engine = mxnet::CpuEngine::Get()->get_engine();
NDArray in_buffer = inputs[0];
if (inputs[0].IsView() && inputs[0].IsDNNLData())
in_buffer = inputs[0].Reorder2Default();
auto i_mem = in_buffer.GetDNNLData();
auto i_desc = i_mem->get_desc();
auto o_desc = i_desc;
o_desc.data.data_type = get_dnnl_type_t<DstType>();
auto reorder_pd = dnnl::reorder::primitive_desc(cpu_engine, i_desc, cpu_engine, o_desc, attr);
auto o_mem = CreateDNNLMem(outputs[0], o_desc, req[0]);
DNNLStream::Get()->RegisterPrimArgs(dnnl::reorder(reorder_pd),
{{DNNL_ARG_FROM, *i_mem}, {DNNL_ARG_TO, *o_mem.second}});
CommitOutput(outputs[0], o_mem);
DNNLStream::Get()->Submit();
}
static void DNNLRequantizeForward(const nnvm::NodeAttrs& attrs,
const OpContext& ctx,
const std::vector<NDArray>& inputs,
const std::vector<OpReqType>& req,
const std::vector<NDArray>& outputs) {
using namespace mshadow;
using namespace mxnet_op;
using red::limits::MaxValue;
using red::limits::MinValue;
typedef int32_t SrcDType;
typedef int8_t DstDType;
const RequantizeParam& param = nnvm::get<RequantizeParam>(attrs.parsed);
float real_range;
// Model is calibrated
if (param.min_calib_range.has_value() && param.max_calib_range.has_value()) {
real_range = MaxAbs(param.min_calib_range.value(), param.max_calib_range.value());
// Model is not calibrated
} else {
NDArray in_buffer = inputs[0].Reorder2Default();
auto in_ptr = in_buffer.data().dptr<SrcDType>();
auto nthreads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount();
SrcDType data_min = MaxValue<SrcDType>();
SrcDType data_max = MinValue<SrcDType>();
std::vector<SrcDType> data_maxs(nthreads, data_max);
std::vector<SrcDType> data_mins(nthreads, data_min);
#pragma omp parallel for num_threads(nthreads)
for (index_t i = 0; i < static_cast<index_t>(in_buffer.shape().Size()); i++) {
int tid = omp_get_thread_num();
if (in_ptr[i] > data_maxs[tid])
data_maxs[tid] = in_ptr[i];
if (in_ptr[i] < data_mins[tid])
data_mins[tid] = in_ptr[i];
}
for (index_t i = 0; i < nthreads; i++) {
if (data_maxs[i] > data_max)
data_max = data_maxs[i];
if (data_mins[i] < data_min)
data_min = data_mins[i];
}
float src_range = MinAbs(MinValue<SrcDType>(), MaxValue<SrcDType>());
SrcDType data_range = MaxAbs(data_min, data_max);
float data_scale = MaxAbs(*inputs[1].data().dptr<float>(), *inputs[2].data().dptr<float>());
real_range = data_range * data_scale / src_range;
}
auto out_type = GetQuantizeOutputType(param);
if (out_type == mshadow::kUint8) {
DNNLRequantizeForwardKer<uint8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else if (out_type == mshadow::kInt8) {
DNNLRequantizeForwardKer<int8_t>(attrs, ctx, inputs, req, outputs, real_range);
} else {
LOG(FATAL) << "oneDNN requantize op only supports int8 and uint8 as output type";
}
}
} // namespace op
} // namespace mxnet
#endif // MXNET_USE_ONEDNN == 1
#endif // MXNET_OPERATOR_QUANTIZATION_DNNL_DNNL_REQUANTIZE_INL_H_
|
light.h | /* light.h */
#include "mytypes.h"
/******************************************************************************/
/* Access macros */
/* light */
#define t0(a,b,c) t0[CELTNDX(a,b,c)]
#define t0_sav(a,b,c) t0_sav[CELTNDX(a,b,c)]
#define t2(a,b,c) t2[CELTNDX(a,b,c)]
/* t0_big has NO guard zones and is nxfull-by-nyfull zones in xy */
#define t0_big(i,j,k) t0_big[(((k)*nyfull + (j))*nxfull + (i))]
/* tN_new is nxl-by-nyl-by-nzl (no guard zones) */
#define tN_new(a,b,c) tN_new[CELTNDX3(a,b,c)]
/******************************************************************************/
/* Light Functions */
void reset_tvar(rcomplex * restrict tvar, rcomplex * restrict tvar_sav);
void reset_tvar_omp45(rcomplex * restrict tvar);
void get_tvar_omp45(rcomplex * restrict tvar);
void reset_t0_big(rcomplex * restrict tbig, rcomplex * restrict tbig_sav);
/* perform rotation using Buneman's method */
void rotth_z_merge(rcomplex * restrict ct0wk, real * restrict thetb, int izlo, int izhi);
void rotth_z_merge3(rcomplex * restrict ct0wk, real * restrict thetb, int izlo, int izhi);
#ifdef _OPENMP
#pragma omp declare target
#endif
void rotth_omp45(rcomplex * restrict tvar, real * restrict thetb, int iz);
void rotth_omp45_pre3D(int nzl, rcomplex * restrict tvar, real * restrict thetb,
int izlo, int izhi);
#ifdef _OPENMP
#pragma omp end declare target
#endif
void rotth_mult_omp45(int nzl, rcomplex * restrict tvar, real * restrict thetb);
void rotth_mult_omp45_pre(int nzl, rcomplex * restrict tvar,
real * restrict thetb);
void rotth_premap(rcomplex * restrict tvar, real * restrict thetb);
void rotth_unmap(rcomplex * restrict tvar, real * restrict thetb);
void couple_z(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_z_merge3(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_omp45(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_premap(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_unmap(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_omp45_pre(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
void couple_omp45_pre_simd(rcomplex * restrict t0, rcomplex * restrict t2,
rcomplex * restrict denp);
|
NAS_BT.c | //---------------------------------------------------------------------
// program BT
//---------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <time.h>
#include <sys/time.h>
#if !defined(CLASS_W) && !defined(CLASS_S) && !defined(CLASS_A) && !defined(CLASS_B) && !defined(CLASS_C) && !defined(CLASS_D) && !defined(CLASS_E)
# define CLASS_W
#endif
//----------
// Class S:
//----------
#ifdef CLASS_S
# define PROBLEM_SIZE 12
# define NITER_DEFAULT 60
# define DT_DEFAULT 0.010
#endif
//----------
// Class W:
//----------
#ifdef CLASS_W
# define PROBLEM_SIZE 24
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0008
#endif
//----------
// Class A:
//----------
#ifdef CLASS_A
# define PROBLEM_SIZE 64
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0008
#endif
//----------
// Class B:
//----------
#ifdef CLASS_B
# define PROBLEM_SIZE 102
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0003
#endif
//----------
// Class C:
//----------
#ifdef CLASS_C
# define PROBLEM_SIZE 162
# define NITER_DEFAULT 200
# define DT_DEFAULT 0.0001
#endif
//----------
// Class D:
//----------
#ifdef CLASS_D
# define PROBLEM_SIZE 408
# define NITER_DEFAULT 250
# define DT_DEFAULT 0.00002
#endif
//----------
// Class E:
//----------
#ifdef CLASS_E
# define PROBLEM_SIZE 1020
# define NITER_DEFAULT 250
# define DT_DEFAULT 0.4e-5
#endif
#define AA 0
#define BB 1
#define CC 2
#define BLOCK_SIZE 5
#define IMAX PROBLEM_SIZE
#define JMAX PROBLEM_SIZE
#define KMAX PROBLEM_SIZE
#define IMAXP IMAX/2*2
#define JMAXP JMAX/2*2
typedef struct
{
double real;
double imag;
} dcomplex;
#define min(x,y) ((x) < (y) ? (x) : (y))
#define max(x,y) ((x) > (y) ? (x) : (y))
/* common /global/ */
int grid_points[3];
/* common /constants/ */
double tx1, tx2, tx3, ty1, ty2, ty3, tz1, tz2, tz3,
dx1, dx2, dx3, dx4, dx5, dy1, dy2, dy3, dy4,
dy5, dz1, dz2, dz3, dz4, dz5, dssp, dt,
ce[5][13], dxmax, dymax, dzmax, xxcon1, xxcon2,
xxcon3, xxcon4, xxcon5, dx1tx1, dx2tx1, dx3tx1,
dx4tx1, dx5tx1, yycon1, yycon2, yycon3, yycon4,
yycon5, dy1ty1, dy2ty1, dy3ty1, dy4ty1, dy5ty1,
zzcon1, zzcon2, zzcon3, zzcon4, zzcon5, dz1tz1,
dz2tz1, dz3tz1, dz4tz1, dz5tz1, dnxm1, dnym1,
dnzm1, c1c2, c1c5, c3c4, c1345, conz1, c1, c2,
c3, c4, c5, c4dssp, c5dssp, dtdssp, dttx1,
dttx2, dtty1, dtty2, dttz1, dttz2, c2dttx1,
c2dtty1, c2dttz1, comz1, comz4, comz5, comz6,
c3c4tx3, c3c4ty3, c3c4tz3, c2iv, con43, con16;
// to improve cache performance, grid dimensions padded by 1
// for even number sizes only.
/* common /fields/ */
double us [KMAX][JMAXP + 1][IMAXP + 1];
double vs [KMAX][JMAXP + 1][IMAXP + 1];
double ws [KMAX][JMAXP + 1][IMAXP + 1];
double qs [KMAX][JMAXP + 1][IMAXP + 1];
double rho_i [KMAX][JMAXP + 1][IMAXP + 1];
double square [KMAX][JMAXP + 1][IMAXP + 1];
double forcing[KMAX][JMAXP + 1][IMAXP + 1][5];
double u [KMAX][JMAXP + 1][IMAXP + 1][5];
double rhs [KMAX][JMAXP + 1][IMAXP + 1][5];
//-----------------------------------------------------------------------
// Timer constants
//-----------------------------------------------------------------------
#define t_total 1
#define t_rhsx 2
#define t_rhsy 3
#define t_rhsz 4
#define t_rhs 5
#define t_xsolve 6
#define t_ysolve 7
#define t_zsolve 8
#define t_rdis1 9
#define t_rdis2 10
#define t_add 11
#define t_last 11
void initialize();
void lhsinit(double lhs[][3][5][5], int size);
void exact_solution(double xi, double eta, double zeta, double dtemp[5]);
void exact_rhs();
void set_constants();
void adi();
void compute_rhs();
void x_solve();
void y_solve();
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5]);
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5]);
void binvcrhs(double lhs[5][5], double c[5][5], double r[5]);
void binvrhs(double lhs[5][5], double r[5]);
void z_solve();
void add();
void error_norm(double rms[5]);
void rhs_norm(double rms[5]);
void verify(int no_time_steps, char *class, int *verified);
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified);
double start[64], elapsed[64];
double elapsed_time( void );
void timer_clear( int n );
void timer_start( int n );
void timer_stop( int n );
double timer_read( int n );
void wtime(double *t);
int main(int argc, char *argv[])
{
int i, niter, step;
double navg, mflops, n3;
double tmax, t, trecs[t_last + 1];
int verified;
char Class;
char *t_names[t_last + 1];
printf("\n\n NAS Parallel Benchmarks (NPB3.3-SER-C) - BT Benchmark\n\n");
niter = NITER_DEFAULT;
dt = DT_DEFAULT;
grid_points[0] = PROBLEM_SIZE;
grid_points[1] = PROBLEM_SIZE;
grid_points[2] = PROBLEM_SIZE;
printf(" Size: %4dx%4dx%4d\n",
grid_points[0], grid_points[1], grid_points[2]);
printf(" Iterations: %4d dt: %10.6f\n", niter, dt);
printf("\n");
if ( (grid_points[0] > IMAX) ||
(grid_points[1] > JMAX) ||
(grid_points[2] > KMAX) )
{
printf(" %d, %d, %d\n", grid_points[0], grid_points[1], grid_points[2]);
printf(" Problem size too big for compiled array sizes\n");
return 0;
}
set_constants();
for (i = 1; i <= t_last; i++)
{
timer_clear(i);
}
initialize();
exact_rhs();
//---------------------------------------------------------------------
// do one time step to touch all code, and reinitialize
//---------------------------------------------------------------------
adi();
initialize();
for (i = 1; i <= t_last; i++)
{
timer_clear(i);
}
timer_start(1);
for (step = 1; step <= niter; step++)
{
if ((step % 20) == 0 || step == 1)
{
printf(" Time step %4d\n", step);
}
adi();
}
timer_stop(1);
tmax = timer_read(1);
verify(niter, &Class, &verified);
n3 = 1.0 * grid_points[0] * grid_points[1] * grid_points[2];
navg = (grid_points[0] + grid_points[1] + grid_points[2]) / 3.0;
if (tmax != 0.0)
{
mflops = 1.0e-6 * (double)niter *
(3478.8 * n3 - 17655.7 * (navg * navg) + 28023.7 * navg)
/ tmax;
}
else
{
mflops = 0.0;
}
print_results("BT", Class, grid_points[0],
grid_points[1], grid_points[2], niter,
tmax, mflops, " floating point",
verified);
int exitValue = verified ? 0 : 1;
return exitValue;
}
void adi()
{
compute_rhs();
x_solve();
y_solve();
z_solve();
add();
}
//---------------------------------------------------------------------
// addition of update to the vector u
//---------------------------------------------------------------------
void add()
{
int i, j, k, m;
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, rhs)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function computes the norm of the difference between the
// computed solution and the exact solution
//---------------------------------------------------------------------
void error_norm(double rms[5])
{
int i, j, k, m, d;
double xi, eta, zeta, u_exact[5], add;
for (m = 0; m < 5; m++)
{
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, add) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, u, u_exact) reduction(+ : rms[:5])
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, u_exact);
for (m = 0; m < 5; m++)
{
add = u[k][j][i][m] - u_exact[m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++)
{
for (d = 0; d < 3; d++)
{
rms[m] = rms[m] / (double)(grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
void rhs_norm(double rms[5])
{
int i, j, k, d, m;
double add;
for (m = 0; m < 5; m++)
{
rms[m] = 0.0;
}
#pragma omp parallel for default(shared) private(k, j, i, m, add) firstprivate(grid_points, rhs) reduction(+ : rms[:5])
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
add = rhs[k][j][i][m];
rms[m] = rms[m] + add * add;
}
}
}
}
for (m = 0; m < 5; m++)
{
for (d = 0; d < 3; d++)
{
rms[m] = rms[m] / (double)(grid_points[d] - 2);
}
rms[m] = sqrt(rms[m]);
}
}
//---------------------------------------------------------------------
// compute the right hand side based on exact solution
//---------------------------------------------------------------------
void exact_rhs()
{
double dtemp[5], xi, eta, zeta, dtpp;
int m, i, j, k, ip1, im1, jp1, jm1, km1, kp1;
double cuf[PROBLEM_SIZE + 1];
double q [PROBLEM_SIZE + 1];
double ue [PROBLEM_SIZE + 1][5];
double buf[PROBLEM_SIZE + 1][5];
//---------------------------------------------------------------------
// initialize
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = 0.0;
}
}
}
}
//---------------------------------------------------------------------
// xi-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, zeta, eta, xi, dtpp, im1, ip1) firstprivate(dnzm1, dnym1, dnxm1, tx2, dx1tx1, c2, xxcon1, dx2tx1, xxcon2, dx3tx1, dx4tx1, c1, xxcon3, xxcon4, xxcon5, dx5tx1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (k = 1; k <= grid_points[2] - 2; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 1; j <= grid_points[1] - 2; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[i][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[i][m] = dtpp * dtemp[m];
}
cuf[i] = buf[i][1] * buf[i][1];
buf[i][0] = cuf[i] + buf[i][2] * buf[i][2] + buf[i][3] * buf[i][3];
q[i] = 0.5 * (buf[i][1] * ue[i][1] + buf[i][2] * ue[i][2] +
buf[i][3] * ue[i][3]);
}
for (i = 1; i <= grid_points[0] - 2; i++)
{
im1 = i - 1;
ip1 = i + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
tx2 * ( ue[ip1][1] - ue[im1][1] ) +
dx1tx1 * (ue[ip1][0] - 2.0 * ue[i][0] + ue[im1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tx2 * (
(ue[ip1][1] * buf[ip1][1] + c2 * (ue[ip1][4] - q[ip1])) -
(ue[im1][1] * buf[im1][1] + c2 * (ue[im1][4] - q[im1]))) +
xxcon1 * (buf[ip1][1] - 2.0 * buf[i][1] + buf[im1][1]) +
dx2tx1 * ( ue[ip1][1] - 2.0 * ue[i][1] + ue[im1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tx2 * (
ue[ip1][2] * buf[ip1][1] - ue[im1][2] * buf[im1][1]) +
xxcon2 * (buf[ip1][2] - 2.0 * buf[i][2] + buf[im1][2]) +
dx3tx1 * ( ue[ip1][2] - 2.0 * ue[i][2] + ue[im1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tx2 * (
ue[ip1][3] * buf[ip1][1] - ue[im1][3] * buf[im1][1]) +
xxcon2 * (buf[ip1][3] - 2.0 * buf[i][3] + buf[im1][3]) +
dx4tx1 * ( ue[ip1][3] - 2.0 * ue[i][3] + ue[im1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tx2 * (
buf[ip1][1] * (c1 * ue[ip1][4] - c2 * q[ip1]) -
buf[im1][1] * (c1 * ue[im1][4] - c2 * q[im1])) +
0.5 * xxcon3 * (buf[ip1][0] - 2.0 * buf[i][0] +
buf[im1][0]) +
xxcon4 * (cuf[ip1] - 2.0 * cuf[i] + cuf[im1]) +
xxcon5 * (buf[ip1][4] - 2.0 * buf[i][4] + buf[im1][4]) +
dx5tx1 * ( ue[ip1][4] - 2.0 * ue[i][4] + ue[im1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
i = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
i = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[i - 1][m] + 6.0 * ue[i][m] -
4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
for (i = 3; i <= grid_points[0] - 4; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] +
6.0 * ue[i][m] - 4.0 * ue[i + 1][m] + ue[i + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
i = grid_points[0] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] +
6.0 * ue[i][m] - 4.0 * ue[i + 1][m]);
i = grid_points[0] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[i - 2][m] - 4.0 * ue[i - 1][m] + 5.0 * ue[i][m]);
}
}
}
//---------------------------------------------------------------------
// eta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, i, j, m, zeta, xi, eta, dtpp, jm1, jp1) firstprivate(dnzm1, dnxm1, dnym1, ty2, dy1ty1, yycon2, dy2ty1, c2, yycon1, dy3ty1, dy4ty1, c1, yycon3, yycon4, yycon5, dy5ty1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (k = 1; k <= grid_points[2] - 2; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
xi = (double)(i) * dnxm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[j][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[j][m] = dtpp * dtemp[m];
}
cuf[j] = buf[j][2] * buf[j][2];
buf[j][0] = cuf[j] + buf[j][1] * buf[j][1] + buf[j][3] * buf[j][3];
q[j] = 0.5 * (buf[j][1] * ue[j][1] + buf[j][2] * ue[j][2] +
buf[j][3] * ue[j][3]);
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
jm1 = j - 1;
jp1 = j + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
ty2 * ( ue[jp1][2] - ue[jm1][2] ) +
dy1ty1 * (ue[jp1][0] - 2.0 * ue[j][0] + ue[jm1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - ty2 * (
ue[jp1][1] * buf[jp1][2] - ue[jm1][1] * buf[jm1][2]) +
yycon2 * (buf[jp1][1] - 2.0 * buf[j][1] + buf[jm1][1]) +
dy2ty1 * ( ue[jp1][1] - 2.0 * ue[j][1] + ue[jm1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - ty2 * (
(ue[jp1][2] * buf[jp1][2] + c2 * (ue[jp1][4] - q[jp1])) -
(ue[jm1][2] * buf[jm1][2] + c2 * (ue[jm1][4] - q[jm1]))) +
yycon1 * (buf[jp1][2] - 2.0 * buf[j][2] + buf[jm1][2]) +
dy3ty1 * ( ue[jp1][2] - 2.0 * ue[j][2] + ue[jm1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - ty2 * (
ue[jp1][3] * buf[jp1][2] - ue[jm1][3] * buf[jm1][2]) +
yycon2 * (buf[jp1][3] - 2.0 * buf[j][3] + buf[jm1][3]) +
dy4ty1 * ( ue[jp1][3] - 2.0 * ue[j][3] + ue[jm1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - ty2 * (
buf[jp1][2] * (c1 * ue[jp1][4] - c2 * q[jp1]) -
buf[jm1][2] * (c1 * ue[jm1][4] - c2 * q[jm1])) +
0.5 * yycon3 * (buf[jp1][0] - 2.0 * buf[j][0] +
buf[jm1][0]) +
yycon4 * (cuf[jp1] - 2.0 * cuf[j] + cuf[jm1]) +
yycon5 * (buf[jp1][4] - 2.0 * buf[j][4] + buf[jm1][4]) +
dy5ty1 * (ue[jp1][4] - 2.0 * ue[j][4] + ue[jm1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
j = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
j = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[j - 1][m] + 6.0 * ue[j][m] -
4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
for (j = 3; j <= grid_points[1] - 4; j++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] +
6.0 * ue[j][m] - 4.0 * ue[j + 1][m] + ue[j + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
j = grid_points[1] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] +
6.0 * ue[j][m] - 4.0 * ue[j + 1][m]);
j = grid_points[1] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[j - 2][m] - 4.0 * ue[j - 1][m] + 5.0 * ue[j][m]);
}
}
}
//---------------------------------------------------------------------
// zeta-direction flux differences
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(j, i, k, m, eta, xi, zeta, dtpp, km1, kp1) firstprivate(dnym1, dnxm1, dnzm1, tz2, dz1tz1, zzcon2, dz2tz1, dz3tz1, c2, zzcon1, dz4tz1, c1, zzcon3, zzcon4, zzcon5, dz5tz1, dssp, grid_points, ce, dtemp, ue, buf, cuf, q)
for (j = 1; j <= grid_points[1] - 2; j++)
{
eta = (double)(j) * dnym1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
xi = (double)(i) * dnxm1;
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
exact_solution(xi, eta, zeta, dtemp);
for (m = 0; m < 5; m++)
{
ue[k][m] = dtemp[m];
}
dtpp = 1.0 / dtemp[0];
for (m = 1; m < 5; m++)
{
buf[k][m] = dtpp * dtemp[m];
}
cuf[k] = buf[k][3] * buf[k][3];
buf[k][0] = cuf[k] + buf[k][1] * buf[k][1] + buf[k][2] * buf[k][2];
q[k] = 0.5 * (buf[k][1] * ue[k][1] + buf[k][2] * ue[k][2] +
buf[k][3] * ue[k][3]);
}
for (k = 1; k <= grid_points[2] - 2; k++)
{
km1 = k - 1;
kp1 = k + 1;
forcing[k][j][i][0] = forcing[k][j][i][0] -
tz2 * ( ue[kp1][3] - ue[km1][3] ) +
dz1tz1 * (ue[kp1][0] - 2.0 * ue[k][0] + ue[km1][0]);
forcing[k][j][i][1] = forcing[k][j][i][1] - tz2 * (
ue[kp1][1] * buf[kp1][3] - ue[km1][1] * buf[km1][3]) +
zzcon2 * (buf[kp1][1] - 2.0 * buf[k][1] + buf[km1][1]) +
dz2tz1 * ( ue[kp1][1] - 2.0 * ue[k][1] + ue[km1][1]);
forcing[k][j][i][2] = forcing[k][j][i][2] - tz2 * (
ue[kp1][2] * buf[kp1][3] - ue[km1][2] * buf[km1][3]) +
zzcon2 * (buf[kp1][2] - 2.0 * buf[k][2] + buf[km1][2]) +
dz3tz1 * (ue[kp1][2] - 2.0 * ue[k][2] + ue[km1][2]);
forcing[k][j][i][3] = forcing[k][j][i][3] - tz2 * (
(ue[kp1][3] * buf[kp1][3] + c2 * (ue[kp1][4] - q[kp1])) -
(ue[km1][3] * buf[km1][3] + c2 * (ue[km1][4] - q[km1]))) +
zzcon1 * (buf[kp1][3] - 2.0 * buf[k][3] + buf[km1][3]) +
dz4tz1 * ( ue[kp1][3] - 2.0 * ue[k][3] + ue[km1][3]);
forcing[k][j][i][4] = forcing[k][j][i][4] - tz2 * (
buf[kp1][3] * (c1 * ue[kp1][4] - c2 * q[kp1]) -
buf[km1][3] * (c1 * ue[km1][4] - c2 * q[km1])) +
0.5 * zzcon3 * (buf[kp1][0] - 2.0 * buf[k][0]
+ buf[km1][0]) +
zzcon4 * (cuf[kp1] - 2.0 * cuf[k] + cuf[km1]) +
zzcon5 * (buf[kp1][4] - 2.0 * buf[k][4] + buf[km1][4]) +
dz5tz1 * ( ue[kp1][4] - 2.0 * ue[k][4] + ue[km1][4]);
}
//---------------------------------------------------------------------
// Fourth-order dissipation
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
k = 1;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(5.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
k = 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(-4.0 * ue[k - 1][m] + 6.0 * ue[k][m] -
4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
for (k = 3; k <= grid_points[2] - 4; k++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] +
6.0 * ue[k][m] - 4.0 * ue[k + 1][m] + ue[k + 2][m]);
}
}
for (m = 0; m < 5; m++)
{
k = grid_points[2] - 3;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] +
6.0 * ue[k][m] - 4.0 * ue[k + 1][m]);
k = grid_points[2] - 2;
forcing[k][j][i][m] = forcing[k][j][i][m] - dssp *
(ue[k - 2][m] - 4.0 * ue[k - 1][m] + 5.0 * ue[k][m]);
}
}
}
//---------------------------------------------------------------------
// now change the sign of the forcing function,
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
forcing[k][j][i][m] = -1.0 * forcing[k][j][i][m];
}
}
}
}
}
//---------------------------------------------------------------------
// this function returns the exact solution at point xi, eta, zeta
//---------------------------------------------------------------------
void exact_solution(double xi, double eta, double zeta, double dtemp[5])
{
int m;
for (m = 0; m < 5; m++)
{
dtemp[m] = ce[m][0] +
xi * (ce[m][1] + xi * (ce[m][4] + xi * (ce[m][7] + xi * ce[m][10]))) +
eta * (ce[m][2] + eta * (ce[m][5] + eta * (ce[m][8] + eta * ce[m][11]))) +
zeta * (ce[m][3] + zeta * (ce[m][6] + zeta * (ce[m][9] +
zeta * ce[m][12])));
}
}
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize()
{
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = 1.0;
}
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, ix, iy, iz, m, zeta, eta, xi, Pxi, Peta, Pzeta) firstprivate(dnzm1, dnym1, dnxm1, grid_points, ce, Pface)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
for (ix = 0; ix < 2; ix++)
{
exact_solution((double)ix, eta, zeta, Pface[ix][0]);
}
for (iy = 0; iy < 2; iy++)
{
exact_solution(xi, (double)iy , zeta, Pface[iy][1]);
}
for (iz = 0; iz < 2; iz++)
{
exact_solution(xi, eta, (double)iz, Pface[iz][2]);
}
for (m = 0; m < 5; m++)
{
Pxi = xi * Pface[1][0][m] + (1.0 - xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0 - eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0 - zeta) * Pface[0][2][m];
u[k][j][i][m] = Pxi + Peta + Pzeta -
Pxi * Peta - Pxi * Pzeta - Peta * Pzeta +
Pxi * Peta * Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
i = 0;
xi = 0.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
i = grid_points[0] - 1;
xi = 1.0;
#pragma omp parallel for default(shared) private(k, j, m, zeta, eta) firstprivate(dnzm1, dnym1, xi, i, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
j = 0;
eta = 0.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
j = grid_points[1] - 1;
eta = 1.0;
#pragma omp parallel for default(shared) private(k, i, m, zeta, xi) firstprivate(dnzm1, dnxm1, eta, j, grid_points, ce, temp)
for (k = 0; k <= grid_points[2] - 1; k++)
{
zeta = (double)(k) * dnzm1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
k = 0;
zeta = 0.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
k = grid_points[2] - 1;
zeta = 1.0;
#pragma omp parallel for default(shared) private(j, i, m, eta, xi) firstprivate(dnym1, dnxm1, zeta, k, grid_points, ce, temp)
for (j = 0; j <= grid_points[1] - 1; j++)
{
eta = (double)(j) * dnym1;
for (i = 0; i <= grid_points[0] - 1; i++)
{
xi = (double)(i) * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++)
{
u[k][j][i][m] = temp[m];
}
}
}
}
void lhsinit(double lhs[][3][5][5], int size)
{
int i, m, n;
i = size;
//---------------------------------------------------------------------
// zero the whole left hand side for starters
//---------------------------------------------------------------------
for (n = 0; n < 5; n++)
{
for (m = 0; m < 5; m++)
{
lhs[0][0][n][m] = 0.0;
lhs[0][1][n][m] = 0.0;
lhs[0][2][n][m] = 0.0;
lhs[i][0][n][m] = 0.0;
lhs[i][1][n][m] = 0.0;
lhs[i][2][n][m] = 0.0;
}
}
//---------------------------------------------------------------------
// next, set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
lhs[0][1][m][m] = 1.0;
lhs[i][1][m][m] = 1.0;
}
}
void compute_rhs()
{
int i, j, k, m;
double rho_inv, uijk, up1, um1, vijk, vp1, vm1, wijk, wp1, wm1;
//---------------------------------------------------------------------
// compute the reciprocal of density, and the kinetic energy,
// and the speed of sound.
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, rho_inv) firstprivate(grid_points, u)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
rho_inv = 1.0 / u[k][j][i][0];
rho_i[k][j][i] = rho_inv;
us[k][j][i] = u[k][j][i][1] * rho_inv;
vs[k][j][i] = u[k][j][i][2] * rho_inv;
ws[k][j][i] = u[k][j][i][3] * rho_inv;
square[k][j][i] = 0.5 * (
u[k][j][i][1] * u[k][j][i][1] +
u[k][j][i][2] * u[k][j][i][2] +
u[k][j][i][3] * u[k][j][i][3] ) * rho_inv;
qs[k][j][i] = square[k][j][i] * rho_inv;
}
}
}
//---------------------------------------------------------------------
// copy the exact forcing term to the right hand side; because
// this forcing term is known, we can store it on the whole grid
// including the boundary
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(grid_points, forcing)
for (k = 0; k <= grid_points[2] - 1; k++)
{
for (j = 0; j <= grid_points[1] - 1; j++)
{
for (i = 0; i <= grid_points[0] - 1; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = forcing[k][j][i][m];
}
}
}
}
//---------------------------------------------------------------------
// compute xi-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, uijk, up1, um1) firstprivate(dx1tx1, tx2, c2, dx2tx1, xxcon2, con43, dx3tx1, dx4tx1, c1, dx5tx1, xxcon3, xxcon4, xxcon5, dssp, grid_points, us, u, square, vs, ws, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
uijk = us[k][j][i];
up1 = us[k][j][i + 1];
um1 = us[k][j][i - 1];
rhs[k][j][i][0] = rhs[k][j][i][0] + dx1tx1 *
(u[k][j][i + 1][0] - 2.0 * u[k][j][i][0] +
u[k][j][i - 1][0]) -
tx2 * (u[k][j][i + 1][1] - u[k][j][i - 1][1]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dx2tx1 *
(u[k][j][i + 1][1] - 2.0 * u[k][j][i][1] +
u[k][j][i - 1][1]) +
xxcon2 * con43 * (up1 - 2.0 * uijk + um1) -
tx2 * (u[k][j][i + 1][1] * up1 -
u[k][j][i - 1][1] * um1 +
(u[k][j][i + 1][4] - square[k][j][i + 1] -
u[k][j][i - 1][4] + square[k][j][i - 1]) *
c2);
rhs[k][j][i][2] = rhs[k][j][i][2] + dx3tx1 *
(u[k][j][i + 1][2] - 2.0 * u[k][j][i][2] +
u[k][j][i - 1][2]) +
xxcon2 * (vs[k][j][i + 1] - 2.0 * vs[k][j][i] +
vs[k][j][i - 1]) -
tx2 * (u[k][j][i + 1][2] * up1 -
u[k][j][i - 1][2] * um1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dx4tx1 *
(u[k][j][i + 1][3] - 2.0 * u[k][j][i][3] +
u[k][j][i - 1][3]) +
xxcon2 * (ws[k][j][i + 1] - 2.0 * ws[k][j][i] +
ws[k][j][i - 1]) -
tx2 * (u[k][j][i + 1][3] * up1 -
u[k][j][i - 1][3] * um1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dx5tx1 *
(u[k][j][i + 1][4] - 2.0 * u[k][j][i][4] +
u[k][j][i - 1][4]) +
xxcon3 * (qs[k][j][i + 1] - 2.0 * qs[k][j][i] +
qs[k][j][i - 1]) +
xxcon4 * (up1 * up1 - 2.0 * uijk * uijk +
um1 * um1) +
xxcon5 * (u[k][j][i + 1][4] * rho_i[k][j][i + 1] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k][j][i - 1][4] * rho_i[k][j][i - 1]) -
tx2 * ( (c1 * u[k][j][i + 1][4] -
c2 * square[k][j][i + 1]) * up1 -
(c1 * u[k][j][i - 1][4] -
c2 * square[k][j][i - 1]) * um1 );
}
}
//---------------------------------------------------------------------
// add fourth order xi-direction dissipation
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1] - 2; j++)
{
i = 1;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] +
u[k][j][i + 2][m]);
}
i = 2;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k][j][i - 1][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k][j][i + 1][m] + u[k][j][i + 2][m]);
}
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 3; i <= grid_points[0] - 4; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] +
u[k][j][i + 2][m] );
}
}
}
for (j = 1; j <= grid_points[1] - 2; j++)
{
i = grid_points[0] - 3;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.0 * u[k][j][i - 1][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j][i + 1][m] );
}
i = grid_points[0] - 2;
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j][i - 2][m] - 4.*u[k][j][i - 1][m] +
5.*u[k][j][i][m] );
}
}
}
//---------------------------------------------------------------------
// compute eta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, m, vijk, vp1, vm1) firstprivate(dy1ty1, ty2, dy2ty1, yycon2, c2, dy3ty1, con43, dy4ty1, c1, dy5ty1, yycon3, yycon4, yycon5, dssp, grid_points, vs, u, us, square, ws, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
vijk = vs[k][j][i];
vp1 = vs[k][j + 1][i];
vm1 = vs[k][j - 1][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dy1ty1 *
(u[k][j + 1][i][0] - 2.0 * u[k][j][i][0] +
u[k][j - 1][i][0]) -
ty2 * (u[k][j + 1][i][2] - u[k][j - 1][i][2]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dy2ty1 *
(u[k][j + 1][i][1] - 2.0 * u[k][j][i][1] +
u[k][j - 1][i][1]) +
yycon2 * (us[k][j + 1][i] - 2.0 * us[k][j][i] +
us[k][j - 1][i]) -
ty2 * (u[k][j + 1][i][1] * vp1 -
u[k][j - 1][i][1] * vm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dy3ty1 *
(u[k][j + 1][i][2] - 2.0 * u[k][j][i][2] +
u[k][j - 1][i][2]) +
yycon2 * con43 * (vp1 - 2.0 * vijk + vm1) -
ty2 * (u[k][j + 1][i][2] * vp1 -
u[k][j - 1][i][2] * vm1 +
(u[k][j + 1][i][4] - square[k][j + 1][i] -
u[k][j - 1][i][4] + square[k][j - 1][i])
* c2);
rhs[k][j][i][3] = rhs[k][j][i][3] + dy4ty1 *
(u[k][j + 1][i][3] - 2.0 * u[k][j][i][3] +
u[k][j - 1][i][3]) +
yycon2 * (ws[k][j + 1][i] - 2.0 * ws[k][j][i] +
ws[k][j - 1][i]) -
ty2 * (u[k][j + 1][i][3] * vp1 -
u[k][j - 1][i][3] * vm1);
rhs[k][j][i][4] = rhs[k][j][i][4] + dy5ty1 *
(u[k][j + 1][i][4] - 2.0 * u[k][j][i][4] +
u[k][j - 1][i][4]) +
yycon3 * (qs[k][j + 1][i] - 2.0 * qs[k][j][i] +
qs[k][j - 1][i]) +
yycon4 * (vp1 * vp1 - 2.0 * vijk * vijk +
vm1 * vm1) +
yycon5 * (u[k][j + 1][i][4] * rho_i[k][j + 1][i] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k][j - 1][i][4] * rho_i[k][j - 1][i]) -
ty2 * ((c1 * u[k][j + 1][i][4] -
c2 * square[k][j + 1][i]) * vp1 -
(c1 * u[k][j - 1][i][4] -
c2 * square[k][j - 1][i]) * vm1);
}
}
//---------------------------------------------------------------------
// add fourth order eta-direction dissipation
//---------------------------------------------------------------------
j = 1;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] +
u[k][j + 2][i][m]);
}
}
j = 2;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k][j - 1][i][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k][j + 1][i][m] + u[k][j + 2][i][m]);
}
}
for (j = 3; j <= grid_points[1] - 4; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] +
u[k][j + 2][i][m] );
}
}
}
j = grid_points[1] - 3;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.0 * u[k][j - 1][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k][j + 1][i][m] );
}
}
j = grid_points[1] - 2;
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k][j - 2][i][m] - 4.*u[k][j - 1][i][m] +
5.*u[k][j][i][m] );
}
}
}
//---------------------------------------------------------------------
// compute zeta-direction fluxes
//---------------------------------------------------------------------
#pragma omp parallel for default(shared) private(k, j, i, wijk, wp1, wm1) firstprivate(dz1tz1, tz2, dz2tz1, zzcon2, dz3tz1, c2, dz4tz1, con43, c1, dz5tz1, zzcon3, zzcon4, zzcon5, grid_points, ws, u, us, vs, square, qs, rho_i)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
wijk = ws[k][j][i];
wp1 = ws[k + 1][j][i];
wm1 = ws[k - 1][j][i];
rhs[k][j][i][0] = rhs[k][j][i][0] + dz1tz1 *
(u[k + 1][j][i][0] - 2.0 * u[k][j][i][0] +
u[k - 1][j][i][0]) -
tz2 * (u[k + 1][j][i][3] - u[k - 1][j][i][3]);
rhs[k][j][i][1] = rhs[k][j][i][1] + dz2tz1 *
(u[k + 1][j][i][1] - 2.0 * u[k][j][i][1] +
u[k - 1][j][i][1]) +
zzcon2 * (us[k + 1][j][i] - 2.0 * us[k][j][i] +
us[k - 1][j][i]) -
tz2 * (u[k + 1][j][i][1] * wp1 -
u[k - 1][j][i][1] * wm1);
rhs[k][j][i][2] = rhs[k][j][i][2] + dz3tz1 *
(u[k + 1][j][i][2] - 2.0 * u[k][j][i][2] +
u[k - 1][j][i][2]) +
zzcon2 * (vs[k + 1][j][i] - 2.0 * vs[k][j][i] +
vs[k - 1][j][i]) -
tz2 * (u[k + 1][j][i][2] * wp1 -
u[k - 1][j][i][2] * wm1);
rhs[k][j][i][3] = rhs[k][j][i][3] + dz4tz1 *
(u[k + 1][j][i][3] - 2.0 * u[k][j][i][3] +
u[k - 1][j][i][3]) +
zzcon2 * con43 * (wp1 - 2.0 * wijk + wm1) -
tz2 * (u[k + 1][j][i][3] * wp1 -
u[k - 1][j][i][3] * wm1 +
(u[k + 1][j][i][4] - square[k + 1][j][i] -
u[k - 1][j][i][4] + square[k - 1][j][i])
* c2);
rhs[k][j][i][4] = rhs[k][j][i][4] + dz5tz1 *
(u[k + 1][j][i][4] - 2.0 * u[k][j][i][4] +
u[k - 1][j][i][4]) +
zzcon3 * (qs[k + 1][j][i] - 2.0 * qs[k][j][i] +
qs[k - 1][j][i]) +
zzcon4 * (wp1 * wp1 - 2.0 * wijk * wijk +
wm1 * wm1) +
zzcon5 * (u[k + 1][j][i][4] * rho_i[k + 1][j][i] -
2.0 * u[k][j][i][4] * rho_i[k][j][i] +
u[k - 1][j][i][4] * rho_i[k - 1][j][i]) -
tz2 * ( (c1 * u[k + 1][j][i][4] -
c2 * square[k + 1][j][i]) * wp1 -
(c1 * u[k - 1][j][i][4] -
c2 * square[k - 1][j][i]) * wm1);
}
}
}
//---------------------------------------------------------------------
// add fourth order zeta-direction dissipation
//---------------------------------------------------------------------
k = 1;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( 5.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] +
u[k + 2][j][i][m]);
}
}
}
k = 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
(-4.0 * u[k - 1][j][i][m] + 6.0 * u[k][j][i][m] -
4.0 * u[k + 1][j][i][m] + u[k + 2][j][i][m]);
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dssp, grid_points, u)
for (k = 3; k <= grid_points[2] - 4; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] +
u[k + 2][j][i][m] );
}
}
}
}
k = grid_points[2] - 3;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.0 * u[k - 1][j][i][m] +
6.0 * u[k][j][i][m] - 4.0 * u[k + 1][j][i][m] );
}
}
}
k = grid_points[2] - 2;
#pragma omp parallel for default(shared) private(j, i, m) firstprivate(k, dssp, grid_points, u)
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] - dssp *
( u[k - 2][j][i][m] - 4.*u[k - 1][j][i][m] +
5.*u[k][j][i][m] );
}
}
}
#pragma omp parallel for default(shared) private(k, j, i, m) firstprivate(dt, grid_points)
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
for (m = 0; m < 5; m++)
{
rhs[k][j][i][m] = rhs[k][j][i][m] * dt;
}
}
}
}
}
void set_constants()
{
ce[0][0] = 2.0;
ce[0][1] = 0.0;
ce[0][2] = 0.0;
ce[0][3] = 4.0;
ce[0][4] = 5.0;
ce[0][5] = 3.0;
ce[0][6] = 0.5;
ce[0][7] = 0.02;
ce[0][8] = 0.01;
ce[0][9] = 0.03;
ce[0][10] = 0.5;
ce[0][11] = 0.4;
ce[0][12] = 0.3;
ce[1][0] = 1.0;
ce[1][1] = 0.0;
ce[1][2] = 0.0;
ce[1][3] = 0.0;
ce[1][4] = 1.0;
ce[1][5] = 2.0;
ce[1][6] = 3.0;
ce[1][7] = 0.01;
ce[1][8] = 0.03;
ce[1][9] = 0.02;
ce[1][10] = 0.4;
ce[1][11] = 0.3;
ce[1][12] = 0.5;
ce[2][0] = 2.0;
ce[2][1] = 2.0;
ce[2][2] = 0.0;
ce[2][3] = 0.0;
ce[2][4] = 0.0;
ce[2][5] = 2.0;
ce[2][6] = 3.0;
ce[2][7] = 0.04;
ce[2][8] = 0.03;
ce[2][9] = 0.05;
ce[2][10] = 0.3;
ce[2][11] = 0.5;
ce[2][12] = 0.4;
ce[3][0] = 2.0;
ce[3][1] = 2.0;
ce[3][2] = 0.0;
ce[3][3] = 0.0;
ce[3][4] = 0.0;
ce[3][5] = 2.0;
ce[3][6] = 3.0;
ce[3][7] = 0.03;
ce[3][8] = 0.05;
ce[3][9] = 0.04;
ce[3][10] = 0.2;
ce[3][11] = 0.1;
ce[3][12] = 0.3;
ce[4][0] = 5.0;
ce[4][1] = 4.0;
ce[4][2] = 3.0;
ce[4][3] = 2.0;
ce[4][4] = 0.1;
ce[4][5] = 0.4;
ce[4][6] = 0.3;
ce[4][7] = 0.05;
ce[4][8] = 0.04;
ce[4][9] = 0.03;
ce[4][10] = 0.1;
ce[4][11] = 0.3;
ce[4][12] = 0.2;
c1 = 1.4;
c2 = 0.4;
c3 = 0.1;
c4 = 1.0;
c5 = 1.4;
dnxm1 = 1.0 / (double)(grid_points[0] - 1);
dnym1 = 1.0 / (double)(grid_points[1] - 1);
dnzm1 = 1.0 / (double)(grid_points[2] - 1);
c1c2 = c1 * c2;
c1c5 = c1 * c5;
c3c4 = c3 * c4;
c1345 = c1c5 * c3c4;
conz1 = (1.0 - c1c5);
tx1 = 1.0 / (dnxm1 * dnxm1);
tx2 = 1.0 / (2.0 * dnxm1);
tx3 = 1.0 / dnxm1;
ty1 = 1.0 / (dnym1 * dnym1);
ty2 = 1.0 / (2.0 * dnym1);
ty3 = 1.0 / dnym1;
tz1 = 1.0 / (dnzm1 * dnzm1);
tz2 = 1.0 / (2.0 * dnzm1);
tz3 = 1.0 / dnzm1;
dx1 = 0.75;
dx2 = 0.75;
dx3 = 0.75;
dx4 = 0.75;
dx5 = 0.75;
dy1 = 0.75;
dy2 = 0.75;
dy3 = 0.75;
dy4 = 0.75;
dy5 = 0.75;
dz1 = 1.0;
dz2 = 1.0;
dz3 = 1.0;
dz4 = 1.0;
dz5 = 1.0;
dxmax = max(dx3, dx4);
dymax = max(dy2, dy4);
dzmax = max(dz2, dz3);
dssp = 0.25 * max(dx1, max(dy1, dz1) );
c4dssp = 4.0 * dssp;
c5dssp = 5.0 * dssp;
dttx1 = dt * tx1;
dttx2 = dt * tx2;
dtty1 = dt * ty1;
dtty2 = dt * ty2;
dttz1 = dt * tz1;
dttz2 = dt * tz2;
c2dttx1 = 2.0 * dttx1;
c2dtty1 = 2.0 * dtty1;
c2dttz1 = 2.0 * dttz1;
dtdssp = dt * dssp;
comz1 = dtdssp;
comz4 = 4.0 * dtdssp;
comz5 = 5.0 * dtdssp;
comz6 = 6.0 * dtdssp;
c3c4tx3 = c3c4 * tx3;
c3c4ty3 = c3c4 * ty3;
c3c4tz3 = c3c4 * tz3;
dx1tx1 = dx1 * tx1;
dx2tx1 = dx2 * tx1;
dx3tx1 = dx3 * tx1;
dx4tx1 = dx4 * tx1;
dx5tx1 = dx5 * tx1;
dy1ty1 = dy1 * ty1;
dy2ty1 = dy2 * ty1;
dy3ty1 = dy3 * ty1;
dy4ty1 = dy4 * ty1;
dy5ty1 = dy5 * ty1;
dz1tz1 = dz1 * tz1;
dz2tz1 = dz2 * tz1;
dz3tz1 = dz3 * tz1;
dz4tz1 = dz4 * tz1;
dz5tz1 = dz5 * tz1;
c2iv = 2.5;
con43 = 4.0 / 3.0;
con16 = 1.0 / 6.0;
xxcon1 = c3c4tx3 * con43 * tx3;
xxcon2 = c3c4tx3 * tx3;
xxcon3 = c3c4tx3 * conz1 * tx3;
xxcon4 = c3c4tx3 * con16 * tx3;
xxcon5 = c3c4tx3 * c1c5 * tx3;
yycon1 = c3c4ty3 * con43 * ty3;
yycon2 = c3c4ty3 * ty3;
yycon3 = c3c4ty3 * conz1 * ty3;
yycon4 = c3c4ty3 * con16 * ty3;
yycon5 = c3c4ty3 * c1c5 * ty3;
zzcon1 = c3c4tz3 * con43 * tz3;
zzcon2 = c3c4tz3 * tz3;
zzcon3 = c3c4tz3 * conz1 * tz3;
zzcon4 = c3c4tz3 * con16 * tz3;
zzcon5 = c3c4tz3 * c1c5 * tz3;
}
//---------------------------------------------------------------------
// subtracts bvec=bvec - ablock*avec
//---------------------------------------------------------------------
void matvec_sub(double ablock[5][5], double avec[5], double bvec[5])
{
//---------------------------------------------------------------------
// rhs[kc][jc][ic][i] = rhs[kc][jc][ic][i]
// $ - lhs[ia][ablock][0][i]*
//---------------------------------------------------------------------
bvec[0] = bvec[0] - ablock[0][0] * avec[0]
- ablock[1][0] * avec[1]
- ablock[2][0] * avec[2]
- ablock[3][0] * avec[3]
- ablock[4][0] * avec[4];
bvec[1] = bvec[1] - ablock[0][1] * avec[0]
- ablock[1][1] * avec[1]
- ablock[2][1] * avec[2]
- ablock[3][1] * avec[3]
- ablock[4][1] * avec[4];
bvec[2] = bvec[2] - ablock[0][2] * avec[0]
- ablock[1][2] * avec[1]
- ablock[2][2] * avec[2]
- ablock[3][2] * avec[3]
- ablock[4][2] * avec[4];
bvec[3] = bvec[3] - ablock[0][3] * avec[0]
- ablock[1][3] * avec[1]
- ablock[2][3] * avec[2]
- ablock[3][3] * avec[3]
- ablock[4][3] * avec[4];
bvec[4] = bvec[4] - ablock[0][4] * avec[0]
- ablock[1][4] * avec[1]
- ablock[2][4] * avec[2]
- ablock[3][4] * avec[3]
- ablock[4][4] * avec[4];
}
//---------------------------------------------------------------------
// subtracts a(i,j,k) X b(i,j,k) from c(i,j,k)
//---------------------------------------------------------------------
void matmul_sub(double ablock[5][5], double bblock[5][5], double cblock[5][5])
{
cblock[0][0] = cblock[0][0] - ablock[0][0] * bblock[0][0]
- ablock[1][0] * bblock[0][1]
- ablock[2][0] * bblock[0][2]
- ablock[3][0] * bblock[0][3]
- ablock[4][0] * bblock[0][4];
cblock[0][1] = cblock[0][1] - ablock[0][1] * bblock[0][0]
- ablock[1][1] * bblock[0][1]
- ablock[2][1] * bblock[0][2]
- ablock[3][1] * bblock[0][3]
- ablock[4][1] * bblock[0][4];
cblock[0][2] = cblock[0][2] - ablock[0][2] * bblock[0][0]
- ablock[1][2] * bblock[0][1]
- ablock[2][2] * bblock[0][2]
- ablock[3][2] * bblock[0][3]
- ablock[4][2] * bblock[0][4];
cblock[0][3] = cblock[0][3] - ablock[0][3] * bblock[0][0]
- ablock[1][3] * bblock[0][1]
- ablock[2][3] * bblock[0][2]
- ablock[3][3] * bblock[0][3]
- ablock[4][3] * bblock[0][4];
cblock[0][4] = cblock[0][4] - ablock[0][4] * bblock[0][0]
- ablock[1][4] * bblock[0][1]
- ablock[2][4] * bblock[0][2]
- ablock[3][4] * bblock[0][3]
- ablock[4][4] * bblock[0][4];
cblock[1][0] = cblock[1][0] - ablock[0][0] * bblock[1][0]
- ablock[1][0] * bblock[1][1]
- ablock[2][0] * bblock[1][2]
- ablock[3][0] * bblock[1][3]
- ablock[4][0] * bblock[1][4];
cblock[1][1] = cblock[1][1] - ablock[0][1] * bblock[1][0]
- ablock[1][1] * bblock[1][1]
- ablock[2][1] * bblock[1][2]
- ablock[3][1] * bblock[1][3]
- ablock[4][1] * bblock[1][4];
cblock[1][2] = cblock[1][2] - ablock[0][2] * bblock[1][0]
- ablock[1][2] * bblock[1][1]
- ablock[2][2] * bblock[1][2]
- ablock[3][2] * bblock[1][3]
- ablock[4][2] * bblock[1][4];
cblock[1][3] = cblock[1][3] - ablock[0][3] * bblock[1][0]
- ablock[1][3] * bblock[1][1]
- ablock[2][3] * bblock[1][2]
- ablock[3][3] * bblock[1][3]
- ablock[4][3] * bblock[1][4];
cblock[1][4] = cblock[1][4] - ablock[0][4] * bblock[1][0]
- ablock[1][4] * bblock[1][1]
- ablock[2][4] * bblock[1][2]
- ablock[3][4] * bblock[1][3]
- ablock[4][4] * bblock[1][4];
cblock[2][0] = cblock[2][0] - ablock[0][0] * bblock[2][0]
- ablock[1][0] * bblock[2][1]
- ablock[2][0] * bblock[2][2]
- ablock[3][0] * bblock[2][3]
- ablock[4][0] * bblock[2][4];
cblock[2][1] = cblock[2][1] - ablock[0][1] * bblock[2][0]
- ablock[1][1] * bblock[2][1]
- ablock[2][1] * bblock[2][2]
- ablock[3][1] * bblock[2][3]
- ablock[4][1] * bblock[2][4];
cblock[2][2] = cblock[2][2] - ablock[0][2] * bblock[2][0]
- ablock[1][2] * bblock[2][1]
- ablock[2][2] * bblock[2][2]
- ablock[3][2] * bblock[2][3]
- ablock[4][2] * bblock[2][4];
cblock[2][3] = cblock[2][3] - ablock[0][3] * bblock[2][0]
- ablock[1][3] * bblock[2][1]
- ablock[2][3] * bblock[2][2]
- ablock[3][3] * bblock[2][3]
- ablock[4][3] * bblock[2][4];
cblock[2][4] = cblock[2][4] - ablock[0][4] * bblock[2][0]
- ablock[1][4] * bblock[2][1]
- ablock[2][4] * bblock[2][2]
- ablock[3][4] * bblock[2][3]
- ablock[4][4] * bblock[2][4];
cblock[3][0] = cblock[3][0] - ablock[0][0] * bblock[3][0]
- ablock[1][0] * bblock[3][1]
- ablock[2][0] * bblock[3][2]
- ablock[3][0] * bblock[3][3]
- ablock[4][0] * bblock[3][4];
cblock[3][1] = cblock[3][1] - ablock[0][1] * bblock[3][0]
- ablock[1][1] * bblock[3][1]
- ablock[2][1] * bblock[3][2]
- ablock[3][1] * bblock[3][3]
- ablock[4][1] * bblock[3][4];
cblock[3][2] = cblock[3][2] - ablock[0][2] * bblock[3][0]
- ablock[1][2] * bblock[3][1]
- ablock[2][2] * bblock[3][2]
- ablock[3][2] * bblock[3][3]
- ablock[4][2] * bblock[3][4];
cblock[3][3] = cblock[3][3] - ablock[0][3] * bblock[3][0]
- ablock[1][3] * bblock[3][1]
- ablock[2][3] * bblock[3][2]
- ablock[3][3] * bblock[3][3]
- ablock[4][3] * bblock[3][4];
cblock[3][4] = cblock[3][4] - ablock[0][4] * bblock[3][0]
- ablock[1][4] * bblock[3][1]
- ablock[2][4] * bblock[3][2]
- ablock[3][4] * bblock[3][3]
- ablock[4][4] * bblock[3][4];
cblock[4][0] = cblock[4][0] - ablock[0][0] * bblock[4][0]
- ablock[1][0] * bblock[4][1]
- ablock[2][0] * bblock[4][2]
- ablock[3][0] * bblock[4][3]
- ablock[4][0] * bblock[4][4];
cblock[4][1] = cblock[4][1] - ablock[0][1] * bblock[4][0]
- ablock[1][1] * bblock[4][1]
- ablock[2][1] * bblock[4][2]
- ablock[3][1] * bblock[4][3]
- ablock[4][1] * bblock[4][4];
cblock[4][2] = cblock[4][2] - ablock[0][2] * bblock[4][0]
- ablock[1][2] * bblock[4][1]
- ablock[2][2] * bblock[4][2]
- ablock[3][2] * bblock[4][3]
- ablock[4][2] * bblock[4][4];
cblock[4][3] = cblock[4][3] - ablock[0][3] * bblock[4][0]
- ablock[1][3] * bblock[4][1]
- ablock[2][3] * bblock[4][2]
- ablock[3][3] * bblock[4][3]
- ablock[4][3] * bblock[4][4];
cblock[4][4] = cblock[4][4] - ablock[0][4] * bblock[4][0]
- ablock[1][4] * bblock[4][1]
- ablock[2][4] * bblock[4][2]
- ablock[3][4] * bblock[4][3]
- ablock[4][4] * bblock[4][4];
}
void binvcrhs(double lhs[5][5], double c[5][5], double r[5])
{
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
c[0][0] = c[0][0] * pivot;
c[1][0] = c[1][0] * pivot;
c[2][0] = c[2][0] * pivot;
c[3][0] = c[3][0] * pivot;
c[4][0] = c[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
c[0][1] = c[0][1] - coeff * c[0][0];
c[1][1] = c[1][1] - coeff * c[1][0];
c[2][1] = c[2][1] - coeff * c[2][0];
c[3][1] = c[3][1] - coeff * c[3][0];
c[4][1] = c[4][1] - coeff * c[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
c[0][2] = c[0][2] - coeff * c[0][0];
c[1][2] = c[1][2] - coeff * c[1][0];
c[2][2] = c[2][2] - coeff * c[2][0];
c[3][2] = c[3][2] - coeff * c[3][0];
c[4][2] = c[4][2] - coeff * c[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
c[0][3] = c[0][3] - coeff * c[0][0];
c[1][3] = c[1][3] - coeff * c[1][0];
c[2][3] = c[2][3] - coeff * c[2][0];
c[3][3] = c[3][3] - coeff * c[3][0];
c[4][3] = c[4][3] - coeff * c[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
c[0][4] = c[0][4] - coeff * c[0][0];
c[1][4] = c[1][4] - coeff * c[1][0];
c[2][4] = c[2][4] - coeff * c[2][0];
c[3][4] = c[3][4] - coeff * c[3][0];
c[4][4] = c[4][4] - coeff * c[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
c[0][1] = c[0][1] * pivot;
c[1][1] = c[1][1] * pivot;
c[2][1] = c[2][1] * pivot;
c[3][1] = c[3][1] * pivot;
c[4][1] = c[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
c[0][0] = c[0][0] - coeff * c[0][1];
c[1][0] = c[1][0] - coeff * c[1][1];
c[2][0] = c[2][0] - coeff * c[2][1];
c[3][0] = c[3][0] - coeff * c[3][1];
c[4][0] = c[4][0] - coeff * c[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
c[0][2] = c[0][2] - coeff * c[0][1];
c[1][2] = c[1][2] - coeff * c[1][1];
c[2][2] = c[2][2] - coeff * c[2][1];
c[3][2] = c[3][2] - coeff * c[3][1];
c[4][2] = c[4][2] - coeff * c[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
c[0][3] = c[0][3] - coeff * c[0][1];
c[1][3] = c[1][3] - coeff * c[1][1];
c[2][3] = c[2][3] - coeff * c[2][1];
c[3][3] = c[3][3] - coeff * c[3][1];
c[4][3] = c[4][3] - coeff * c[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
c[0][4] = c[0][4] - coeff * c[0][1];
c[1][4] = c[1][4] - coeff * c[1][1];
c[2][4] = c[2][4] - coeff * c[2][1];
c[3][4] = c[3][4] - coeff * c[3][1];
c[4][4] = c[4][4] - coeff * c[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
c[0][2] = c[0][2] * pivot;
c[1][2] = c[1][2] * pivot;
c[2][2] = c[2][2] * pivot;
c[3][2] = c[3][2] * pivot;
c[4][2] = c[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
c[0][0] = c[0][0] - coeff * c[0][2];
c[1][0] = c[1][0] - coeff * c[1][2];
c[2][0] = c[2][0] - coeff * c[2][2];
c[3][0] = c[3][0] - coeff * c[3][2];
c[4][0] = c[4][0] - coeff * c[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
c[0][1] = c[0][1] - coeff * c[0][2];
c[1][1] = c[1][1] - coeff * c[1][2];
c[2][1] = c[2][1] - coeff * c[2][2];
c[3][1] = c[3][1] - coeff * c[3][2];
c[4][1] = c[4][1] - coeff * c[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
c[0][3] = c[0][3] - coeff * c[0][2];
c[1][3] = c[1][3] - coeff * c[1][2];
c[2][3] = c[2][3] - coeff * c[2][2];
c[3][3] = c[3][3] - coeff * c[3][2];
c[4][3] = c[4][3] - coeff * c[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
c[0][4] = c[0][4] - coeff * c[0][2];
c[1][4] = c[1][4] - coeff * c[1][2];
c[2][4] = c[2][4] - coeff * c[2][2];
c[3][4] = c[3][4] - coeff * c[3][2];
c[4][4] = c[4][4] - coeff * c[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
c[0][3] = c[0][3] * pivot;
c[1][3] = c[1][3] * pivot;
c[2][3] = c[2][3] * pivot;
c[3][3] = c[3][3] * pivot;
c[4][3] = c[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
c[0][0] = c[0][0] - coeff * c[0][3];
c[1][0] = c[1][0] - coeff * c[1][3];
c[2][0] = c[2][0] - coeff * c[2][3];
c[3][0] = c[3][0] - coeff * c[3][3];
c[4][0] = c[4][0] - coeff * c[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
c[0][1] = c[0][1] - coeff * c[0][3];
c[1][1] = c[1][1] - coeff * c[1][3];
c[2][1] = c[2][1] - coeff * c[2][3];
c[3][1] = c[3][1] - coeff * c[3][3];
c[4][1] = c[4][1] - coeff * c[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
c[0][2] = c[0][2] - coeff * c[0][3];
c[1][2] = c[1][2] - coeff * c[1][3];
c[2][2] = c[2][2] - coeff * c[2][3];
c[3][2] = c[3][2] - coeff * c[3][3];
c[4][2] = c[4][2] - coeff * c[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
c[0][4] = c[0][4] - coeff * c[0][3];
c[1][4] = c[1][4] - coeff * c[1][3];
c[2][4] = c[2][4] - coeff * c[2][3];
c[3][4] = c[3][4] - coeff * c[3][3];
c[4][4] = c[4][4] - coeff * c[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
c[0][4] = c[0][4] * pivot;
c[1][4] = c[1][4] * pivot;
c[2][4] = c[2][4] * pivot;
c[3][4] = c[3][4] * pivot;
c[4][4] = c[4][4] * pivot;
r[4] = r[4] * pivot;
coeff = lhs[4][0];
c[0][0] = c[0][0] - coeff * c[0][4];
c[1][0] = c[1][0] - coeff * c[1][4];
c[2][0] = c[2][0] - coeff * c[2][4];
c[3][0] = c[3][0] - coeff * c[3][4];
c[4][0] = c[4][0] - coeff * c[4][4];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
c[0][1] = c[0][1] - coeff * c[0][4];
c[1][1] = c[1][1] - coeff * c[1][4];
c[2][1] = c[2][1] - coeff * c[2][4];
c[3][1] = c[3][1] - coeff * c[3][4];
c[4][1] = c[4][1] - coeff * c[4][4];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
c[0][2] = c[0][2] - coeff * c[0][4];
c[1][2] = c[1][2] - coeff * c[1][4];
c[2][2] = c[2][2] - coeff * c[2][4];
c[3][2] = c[3][2] - coeff * c[3][4];
c[4][2] = c[4][2] - coeff * c[4][4];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
c[0][3] = c[0][3] - coeff * c[0][4];
c[1][3] = c[1][3] - coeff * c[1][4];
c[2][3] = c[2][3] - coeff * c[2][4];
c[3][3] = c[3][3] - coeff * c[3][4];
c[4][3] = c[4][3] - coeff * c[4][4];
r[3] = r[3] - coeff * r[4];
}
void binvrhs(double lhs[5][5], double r[5])
{
double pivot, coeff;
pivot = 1.00 / lhs[0][0];
lhs[1][0] = lhs[1][0] * pivot;
lhs[2][0] = lhs[2][0] * pivot;
lhs[3][0] = lhs[3][0] * pivot;
lhs[4][0] = lhs[4][0] * pivot;
r[0] = r[0] * pivot;
coeff = lhs[0][1];
lhs[1][1] = lhs[1][1] - coeff * lhs[1][0];
lhs[2][1] = lhs[2][1] - coeff * lhs[2][0];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][0];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][0];
r[1] = r[1] - coeff * r[0];
coeff = lhs[0][2];
lhs[1][2] = lhs[1][2] - coeff * lhs[1][0];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][0];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][0];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][0];
r[2] = r[2] - coeff * r[0];
coeff = lhs[0][3];
lhs[1][3] = lhs[1][3] - coeff * lhs[1][0];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][0];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][0];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][0];
r[3] = r[3] - coeff * r[0];
coeff = lhs[0][4];
lhs[1][4] = lhs[1][4] - coeff * lhs[1][0];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][0];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][0];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][0];
r[4] = r[4] - coeff * r[0];
pivot = 1.00 / lhs[1][1];
lhs[2][1] = lhs[2][1] * pivot;
lhs[3][1] = lhs[3][1] * pivot;
lhs[4][1] = lhs[4][1] * pivot;
r[1] = r[1] * pivot;
coeff = lhs[1][0];
lhs[2][0] = lhs[2][0] - coeff * lhs[2][1];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][1];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][1];
r[0] = r[0] - coeff * r[1];
coeff = lhs[1][2];
lhs[2][2] = lhs[2][2] - coeff * lhs[2][1];
lhs[3][2] = lhs[3][2] - coeff * lhs[3][1];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][1];
r[2] = r[2] - coeff * r[1];
coeff = lhs[1][3];
lhs[2][3] = lhs[2][3] - coeff * lhs[2][1];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][1];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][1];
r[3] = r[3] - coeff * r[1];
coeff = lhs[1][4];
lhs[2][4] = lhs[2][4] - coeff * lhs[2][1];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][1];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][1];
r[4] = r[4] - coeff * r[1];
pivot = 1.00 / lhs[2][2];
lhs[3][2] = lhs[3][2] * pivot;
lhs[4][2] = lhs[4][2] * pivot;
r[2] = r[2] * pivot;
coeff = lhs[2][0];
lhs[3][0] = lhs[3][0] - coeff * lhs[3][2];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][2];
r[0] = r[0] - coeff * r[2];
coeff = lhs[2][1];
lhs[3][1] = lhs[3][1] - coeff * lhs[3][2];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][2];
r[1] = r[1] - coeff * r[2];
coeff = lhs[2][3];
lhs[3][3] = lhs[3][3] - coeff * lhs[3][2];
lhs[4][3] = lhs[4][3] - coeff * lhs[4][2];
r[3] = r[3] - coeff * r[2];
coeff = lhs[2][4];
lhs[3][4] = lhs[3][4] - coeff * lhs[3][2];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][2];
r[4] = r[4] - coeff * r[2];
pivot = 1.00 / lhs[3][3];
lhs[4][3] = lhs[4][3] * pivot;
r[3] = r[3] * pivot;
coeff = lhs[3][0];
lhs[4][0] = lhs[4][0] - coeff * lhs[4][3];
r[0] = r[0] - coeff * r[3];
coeff = lhs[3][1];
lhs[4][1] = lhs[4][1] - coeff * lhs[4][3];
r[1] = r[1] - coeff * r[3];
coeff = lhs[3][2];
lhs[4][2] = lhs[4][2] - coeff * lhs[4][3];
r[2] = r[2] - coeff * r[3];
coeff = lhs[3][4];
lhs[4][4] = lhs[4][4] - coeff * lhs[4][3];
r[4] = r[4] - coeff * r[3];
pivot = 1.00 / lhs[4][4];
r[4] = r[4] * pivot;
coeff = lhs[4][0];
r[0] = r[0] - coeff * r[4];
coeff = lhs[4][1];
r[1] = r[1] - coeff * r[4];
coeff = lhs[4][2];
r[2] = r[2] - coeff * r[4];
coeff = lhs[4][3];
r[3] = r[3] - coeff * r[4];
}
//---------------------------------------------------------------------
// verification routine
//---------------------------------------------------------------------
void verify(int no_time_steps, char *Class, int *verified)
{
double xcrref[5], xceref[5], xcrdif[5], xcedif[5];
double epsilon, xce[5], xcr[5], dtref = 0.0;
int m;
//---------------------------------------------------------------------
// tolerance level
//---------------------------------------------------------------------
epsilon = 1.0e-08;
//---------------------------------------------------------------------
// compute the error norm and the residual norm, and exit if not printing
//---------------------------------------------------------------------
error_norm(xce);
compute_rhs();
rhs_norm(xcr);
for (m = 0; m < 5; m++)
{
xcr[m] = xcr[m] / dt;
}
*Class = 'U';
*verified = 1;
for (m = 0; m < 5; m++)
{
xcrref[m] = 1.0;
xceref[m] = 1.0;
}
//---------------------------------------------------------------------
// reference data for 12X12X12 grids after 60 time steps, with DT = 1.0e-02
//---------------------------------------------------------------------
if ( (grid_points[0] == 12) && (grid_points[1] == 12) &&
(grid_points[2] == 12) && (no_time_steps == 60))
{
*Class = 'S';
dtref = 1.0e-2;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.7034283709541311e-01;
xcrref[1] = 1.2975252070034097e-02;
xcrref[2] = 3.2527926989486055e-02;
xcrref[3] = 2.6436421275166801e-02;
xcrref[4] = 1.9211784131744430e-01;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.9976913345811579e-04;
xceref[1] = 4.5195666782961927e-05;
xceref[2] = 7.3973765172921357e-05;
xceref[3] = 7.3821238632439731e-05;
xceref[4] = 8.9269630987491446e-04;
//---------------------------------------------------------------------
// reference data for 24X24X24 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 24) && (grid_points[1] == 24) &&
(grid_points[2] == 24) && (no_time_steps == 200) )
{
*Class = 'W';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.1125590409344e+03;
xcrref[1] = 0.1180007595731e+02;
xcrref[2] = 0.2710329767846e+02;
xcrref[3] = 0.2469174937669e+02;
xcrref[4] = 0.2638427874317e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4419655736008e+01;
xceref[1] = 0.4638531260002e+00;
xceref[2] = 0.1011551749967e+01;
xceref[3] = 0.9235878729944e+00;
xceref[4] = 0.1018045837718e+02;
//---------------------------------------------------------------------
// reference data for 64X64X64 grids after 200 time steps,
// with DT = 0.8e-3
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 64) && (grid_points[1] == 64) &&
(grid_points[2] == 64) && (no_time_steps == 200) )
{
*Class = 'A';
dtref = 0.8e-3;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.0806346714637264e+02;
xcrref[1] = 1.1319730901220813e+01;
xcrref[2] = 2.5974354511582465e+01;
xcrref[3] = 2.3665622544678910e+01;
xcrref[4] = 2.5278963211748344e+02;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 4.2348416040525025e+00;
xceref[1] = 4.4390282496995698e-01;
xceref[2] = 9.6692480136345650e-01;
xceref[3] = 8.8302063039765474e-01;
xceref[4] = 9.7379901770829278e+00;
//---------------------------------------------------------------------
// reference data for 102X102X102 grids after 200 time steps,
// with DT = 3.0e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 102) && (grid_points[1] == 102) &&
(grid_points[2] == 102) && (no_time_steps == 200) )
{
*Class = 'B';
dtref = 3.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 1.4233597229287254e+03;
xcrref[1] = 9.9330522590150238e+01;
xcrref[2] = 3.5646025644535285e+02;
xcrref[3] = 3.2485447959084092e+02;
xcrref[4] = 3.2707541254659363e+03;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 5.2969847140936856e+01;
xceref[1] = 4.4632896115670668e+00;
xceref[2] = 1.3122573342210174e+01;
xceref[3] = 1.2006925323559144e+01;
xceref[4] = 1.2459576151035986e+02;
//---------------------------------------------------------------------
// reference data for 162X162X162 grids after 200 time steps,
// with DT = 1.0e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 162) && (grid_points[1] == 162) &&
(grid_points[2] == 162) && (no_time_steps == 200) )
{
*Class = 'C';
dtref = 1.0e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.62398116551764615e+04;
xcrref[1] = 0.50793239190423964e+03;
xcrref[2] = 0.15423530093013596e+04;
xcrref[3] = 0.13302387929291190e+04;
xcrref[4] = 0.11604087428436455e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.16462008369091265e+03;
xceref[1] = 0.11497107903824313e+02;
xceref[2] = 0.41207446207461508e+02;
xceref[3] = 0.37087651059694167e+02;
xceref[4] = 0.36211053051841265e+03;
//---------------------------------------------------------------------
// reference data for 408x408x408 grids after 250 time steps,
// with DT = 0.2e-04
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 408) && (grid_points[1] == 408) &&
(grid_points[2] == 408) && (no_time_steps == 250) )
{
*Class = 'D';
dtref = 0.2e-4;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.2533188551738e+05;
xcrref[1] = 0.2346393716980e+04;
xcrref[2] = 0.6294554366904e+04;
xcrref[3] = 0.5352565376030e+04;
xcrref[4] = 0.3905864038618e+05;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.3100009377557e+03;
xceref[1] = 0.2424086324913e+02;
xceref[2] = 0.7782212022645e+02;
xceref[3] = 0.6835623860116e+02;
xceref[4] = 0.6065737200368e+03;
//---------------------------------------------------------------------
// reference data for 1020x1020x1020 grids after 250 time steps,
// with DT = 0.4e-05
//---------------------------------------------------------------------
}
else if ( (grid_points[0] == 1020) && (grid_points[1] == 1020) &&
(grid_points[2] == 1020) && (no_time_steps == 250) )
{
*Class = 'E';
dtref = 0.4e-5;
//---------------------------------------------------------------------
// Reference values of RMS-norms of residual.
//---------------------------------------------------------------------
xcrref[0] = 0.9795372484517e+05;
xcrref[1] = 0.9739814511521e+04;
xcrref[2] = 0.2467606342965e+05;
xcrref[3] = 0.2092419572860e+05;
xcrref[4] = 0.1392138856939e+06;
//---------------------------------------------------------------------
// Reference values of RMS-norms of solution error.
//---------------------------------------------------------------------
xceref[0] = 0.4327562208414e+03;
xceref[1] = 0.3699051964887e+02;
xceref[2] = 0.1089845040954e+03;
xceref[3] = 0.9462517622043e+02;
xceref[4] = 0.7765512765309e+03;
}
else
{
*verified = 0;
}
//---------------------------------------------------------------------
// verification test for residuals if gridsize is one of
// the defined grid sizes above (*Class != 'U')
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// Compute the difference of solution values and the known reference values.
//---------------------------------------------------------------------
for (m = 0; m < 5; m++)
{
xcrdif[m] = fabs((xcr[m] - xcrref[m]) / xcrref[m]);
xcedif[m] = fabs((xce[m] - xceref[m]) / xceref[m]);
}
//---------------------------------------------------------------------
// Output the comparison of computed results to known cases.
//---------------------------------------------------------------------
if (*Class != 'U')
{
printf(" Verification being performed for class %c\n", *Class);
printf(" accuracy setting for epsilon = %20.13E\n", epsilon);
*verified = (fabs(dt - dtref) <= epsilon);
if (!(*verified))
{
*Class = 'U';
printf(" DT does not match the reference value of %15.8E\n", dtref);
}
}
else
{
printf(" Unknown class\n");
}
if (*Class != 'U')
{
printf(" Comparison of RMS-norms of residual\n");
}
else
{
printf(" RMS-norms of residual\n");
}
for (m = 0; m < 5; m++)
{
if (*Class == 'U')
{
printf(" %2d%20.13E\n", m + 1, xcr[m]);
}
else if (xcrdif[m] <= epsilon)
{
printf(" %2d%20.13E%20.13E%20.13E\n",
m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
else
{
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",
m + 1, xcr[m], xcrref[m], xcrdif[m]);
}
}
if (*Class != 'U')
{
printf(" Comparison of RMS-norms of solution error\n");
}
else
{
printf(" RMS-norms of solution error\n");
}
for (m = 0; m < 5; m++)
{
if (*Class == 'U')
{
printf(" %2d%20.13E\n", m + 1, xce[m]);
}
else if (xcedif[m] <= epsilon)
{
printf(" %2d%20.13E%20.13E%20.13E\n",
m + 1, xce[m], xceref[m], xcedif[m]);
}
else
{
*verified = 0;
printf(" FAILURE: %2d%20.13E%20.13E%20.13E\n",
m + 1, xce[m], xceref[m], xcedif[m]);
}
}
if (*Class == 'U')
{
printf(" No reference values provided\n");
printf(" No verification performed\n");
}
else if (*verified)
{
printf(" Verification Successful\n");
}
else
{
printf(" Verification failed\n");
}
}
//---------------------------------------------------------------------
//
// Performs line solves in X direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//
//---------------------------------------------------------------------
void x_solve()
{
int i, j, k, m, n, isize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side in the xi-direction
//---------------------------------------------------------------------
isize = grid_points[0] - 1;
//---------------------------------------------------------------------
// determine a (labeled f) and n jacobians
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (j = 1; j <= grid_points[1] - 2; j++)
{
#pragma omp parallel for default(shared) private(i, tmp1, tmp2, tmp3) firstprivate(isize, k, j, c2, c1, con43, c3c4, c1345, rho_i, u, qs, square)
for (i = 0; i <= isize; i++)
{
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
//-------------------------------------------------------------------
//
//-------------------------------------------------------------------
fjac[i][0][0] = 0.0;
fjac[i][1][0] = 1.0;
fjac[i][2][0] = 0.0;
fjac[i][3][0] = 0.0;
fjac[i][4][0] = 0.0;
fjac[i][0][1] = -(u[k][j][i][1] * tmp2 * u[k][j][i][1])
+ c2 * qs[k][j][i];
fjac[i][1][1] = ( 2.0 - c2 ) * ( u[k][j][i][1] / u[k][j][i][0] );
fjac[i][2][1] = - c2 * ( u[k][j][i][2] * tmp1 );
fjac[i][3][1] = - c2 * ( u[k][j][i][3] * tmp1 );
fjac[i][4][1] = c2;
fjac[i][0][2] = - ( u[k][j][i][1] * u[k][j][i][2] ) * tmp2;
fjac[i][1][2] = u[k][j][i][2] * tmp1;
fjac[i][2][2] = u[k][j][i][1] * tmp1;
fjac[i][3][2] = 0.0;
fjac[i][4][2] = 0.0;
fjac[i][0][3] = - ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[i][1][3] = u[k][j][i][3] * tmp1;
fjac[i][2][3] = 0.0;
fjac[i][3][3] = u[k][j][i][1] * tmp1;
fjac[i][4][3] = 0.0;
fjac[i][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* ( u[k][j][i][1] * tmp2 );
fjac[i][1][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( u[k][j][i][1] * u[k][j][i][1] * tmp2 + qs[k][j][i] );
fjac[i][2][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][1] ) * tmp2;
fjac[i][3][4] = - c2 * ( u[k][j][i][3] * u[k][j][i][1] ) * tmp2;
fjac[i][4][4] = c1 * ( u[k][j][i][1] * tmp1 );
njac[i][0][0] = 0.0;
njac[i][1][0] = 0.0;
njac[i][2][0] = 0.0;
njac[i][3][0] = 0.0;
njac[i][4][0] = 0.0;
njac[i][0][1] = - con43 * c3c4 * tmp2 * u[k][j][i][1];
njac[i][1][1] = con43 * c3c4 * tmp1;
njac[i][2][1] = 0.0;
njac[i][3][1] = 0.0;
njac[i][4][1] = 0.0;
njac[i][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[i][1][2] = 0.0;
njac[i][2][2] = c3c4 * tmp1;
njac[i][3][2] = 0.0;
njac[i][4][2] = 0.0;
njac[i][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[i][1][3] = 0.0;
njac[i][2][3] = 0.0;
njac[i][3][3] = c3c4 * tmp1;
njac[i][4][3] = 0.0;
njac[i][0][4] = - ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[i][1][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][1];
njac[i][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[i][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[i][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in x direction
//---------------------------------------------------------------------
lhsinit(lhs, isize);
#pragma omp parallel for default(shared) private(i, tmp1, tmp2) firstprivate(isize, dt, tx1, tx2, dx1, dx2, dx3, dx4, dx5, fjac, njac)
for (i = 1; i <= isize - 1; i++)
{
tmp1 = dt * tx1;
tmp2 = dt * tx2;
lhs[i][AA][0][0] = - tmp2 * fjac[i - 1][0][0]
- tmp1 * njac[i - 1][0][0]
- tmp1 * dx1;
lhs[i][AA][1][0] = - tmp2 * fjac[i - 1][1][0]
- tmp1 * njac[i - 1][1][0];
lhs[i][AA][2][0] = - tmp2 * fjac[i - 1][2][0]
- tmp1 * njac[i - 1][2][0];
lhs[i][AA][3][0] = - tmp2 * fjac[i - 1][3][0]
- tmp1 * njac[i - 1][3][0];
lhs[i][AA][4][0] = - tmp2 * fjac[i - 1][4][0]
- tmp1 * njac[i - 1][4][0];
lhs[i][AA][0][1] = - tmp2 * fjac[i - 1][0][1]
- tmp1 * njac[i - 1][0][1];
lhs[i][AA][1][1] = - tmp2 * fjac[i - 1][1][1]
- tmp1 * njac[i - 1][1][1]
- tmp1 * dx2;
lhs[i][AA][2][1] = - tmp2 * fjac[i - 1][2][1]
- tmp1 * njac[i - 1][2][1];
lhs[i][AA][3][1] = - tmp2 * fjac[i - 1][3][1]
- tmp1 * njac[i - 1][3][1];
lhs[i][AA][4][1] = - tmp2 * fjac[i - 1][4][1]
- tmp1 * njac[i - 1][4][1];
lhs[i][AA][0][2] = - tmp2 * fjac[i - 1][0][2]
- tmp1 * njac[i - 1][0][2];
lhs[i][AA][1][2] = - tmp2 * fjac[i - 1][1][2]
- tmp1 * njac[i - 1][1][2];
lhs[i][AA][2][2] = - tmp2 * fjac[i - 1][2][2]
- tmp1 * njac[i - 1][2][2]
- tmp1 * dx3;
lhs[i][AA][3][2] = - tmp2 * fjac[i - 1][3][2]
- tmp1 * njac[i - 1][3][2];
lhs[i][AA][4][2] = - tmp2 * fjac[i - 1][4][2]
- tmp1 * njac[i - 1][4][2];
lhs[i][AA][0][3] = - tmp2 * fjac[i - 1][0][3]
- tmp1 * njac[i - 1][0][3];
lhs[i][AA][1][3] = - tmp2 * fjac[i - 1][1][3]
- tmp1 * njac[i - 1][1][3];
lhs[i][AA][2][3] = - tmp2 * fjac[i - 1][2][3]
- tmp1 * njac[i - 1][2][3];
lhs[i][AA][3][3] = - tmp2 * fjac[i - 1][3][3]
- tmp1 * njac[i - 1][3][3]
- tmp1 * dx4;
lhs[i][AA][4][3] = - tmp2 * fjac[i - 1][4][3]
- tmp1 * njac[i - 1][4][3];
lhs[i][AA][0][4] = - tmp2 * fjac[i - 1][0][4]
- tmp1 * njac[i - 1][0][4];
lhs[i][AA][1][4] = - tmp2 * fjac[i - 1][1][4]
- tmp1 * njac[i - 1][1][4];
lhs[i][AA][2][4] = - tmp2 * fjac[i - 1][2][4]
- tmp1 * njac[i - 1][2][4];
lhs[i][AA][3][4] = - tmp2 * fjac[i - 1][3][4]
- tmp1 * njac[i - 1][3][4];
lhs[i][AA][4][4] = - tmp2 * fjac[i - 1][4][4]
- tmp1 * njac[i - 1][4][4]
- tmp1 * dx5;
lhs[i][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[i][0][0]
+ tmp1 * 2.0 * dx1;
lhs[i][BB][1][0] = tmp1 * 2.0 * njac[i][1][0];
lhs[i][BB][2][0] = tmp1 * 2.0 * njac[i][2][0];
lhs[i][BB][3][0] = tmp1 * 2.0 * njac[i][3][0];
lhs[i][BB][4][0] = tmp1 * 2.0 * njac[i][4][0];
lhs[i][BB][0][1] = tmp1 * 2.0 * njac[i][0][1];
lhs[i][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[i][1][1]
+ tmp1 * 2.0 * dx2;
lhs[i][BB][2][1] = tmp1 * 2.0 * njac[i][2][1];
lhs[i][BB][3][1] = tmp1 * 2.0 * njac[i][3][1];
lhs[i][BB][4][1] = tmp1 * 2.0 * njac[i][4][1];
lhs[i][BB][0][2] = tmp1 * 2.0 * njac[i][0][2];
lhs[i][BB][1][2] = tmp1 * 2.0 * njac[i][1][2];
lhs[i][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[i][2][2]
+ tmp1 * 2.0 * dx3;
lhs[i][BB][3][2] = tmp1 * 2.0 * njac[i][3][2];
lhs[i][BB][4][2] = tmp1 * 2.0 * njac[i][4][2];
lhs[i][BB][0][3] = tmp1 * 2.0 * njac[i][0][3];
lhs[i][BB][1][3] = tmp1 * 2.0 * njac[i][1][3];
lhs[i][BB][2][3] = tmp1 * 2.0 * njac[i][2][3];
lhs[i][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[i][3][3]
+ tmp1 * 2.0 * dx4;
lhs[i][BB][4][3] = tmp1 * 2.0 * njac[i][4][3];
lhs[i][BB][0][4] = tmp1 * 2.0 * njac[i][0][4];
lhs[i][BB][1][4] = tmp1 * 2.0 * njac[i][1][4];
lhs[i][BB][2][4] = tmp1 * 2.0 * njac[i][2][4];
lhs[i][BB][3][4] = tmp1 * 2.0 * njac[i][3][4];
lhs[i][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[i][4][4]
+ tmp1 * 2.0 * dx5;
lhs[i][CC][0][0] = tmp2 * fjac[i + 1][0][0]
- tmp1 * njac[i + 1][0][0]
- tmp1 * dx1;
lhs[i][CC][1][0] = tmp2 * fjac[i + 1][1][0]
- tmp1 * njac[i + 1][1][0];
lhs[i][CC][2][0] = tmp2 * fjac[i + 1][2][0]
- tmp1 * njac[i + 1][2][0];
lhs[i][CC][3][0] = tmp2 * fjac[i + 1][3][0]
- tmp1 * njac[i + 1][3][0];
lhs[i][CC][4][0] = tmp2 * fjac[i + 1][4][0]
- tmp1 * njac[i + 1][4][0];
lhs[i][CC][0][1] = tmp2 * fjac[i + 1][0][1]
- tmp1 * njac[i + 1][0][1];
lhs[i][CC][1][1] = tmp2 * fjac[i + 1][1][1]
- tmp1 * njac[i + 1][1][1]
- tmp1 * dx2;
lhs[i][CC][2][1] = tmp2 * fjac[i + 1][2][1]
- tmp1 * njac[i + 1][2][1];
lhs[i][CC][3][1] = tmp2 * fjac[i + 1][3][1]
- tmp1 * njac[i + 1][3][1];
lhs[i][CC][4][1] = tmp2 * fjac[i + 1][4][1]
- tmp1 * njac[i + 1][4][1];
lhs[i][CC][0][2] = tmp2 * fjac[i + 1][0][2]
- tmp1 * njac[i + 1][0][2];
lhs[i][CC][1][2] = tmp2 * fjac[i + 1][1][2]
- tmp1 * njac[i + 1][1][2];
lhs[i][CC][2][2] = tmp2 * fjac[i + 1][2][2]
- tmp1 * njac[i + 1][2][2]
- tmp1 * dx3;
lhs[i][CC][3][2] = tmp2 * fjac[i + 1][3][2]
- tmp1 * njac[i + 1][3][2];
lhs[i][CC][4][2] = tmp2 * fjac[i + 1][4][2]
- tmp1 * njac[i + 1][4][2];
lhs[i][CC][0][3] = tmp2 * fjac[i + 1][0][3]
- tmp1 * njac[i + 1][0][3];
lhs[i][CC][1][3] = tmp2 * fjac[i + 1][1][3]
- tmp1 * njac[i + 1][1][3];
lhs[i][CC][2][3] = tmp2 * fjac[i + 1][2][3]
- tmp1 * njac[i + 1][2][3];
lhs[i][CC][3][3] = tmp2 * fjac[i + 1][3][3]
- tmp1 * njac[i + 1][3][3]
- tmp1 * dx4;
lhs[i][CC][4][3] = tmp2 * fjac[i + 1][4][3]
- tmp1 * njac[i + 1][4][3];
lhs[i][CC][0][4] = tmp2 * fjac[i + 1][0][4]
- tmp1 * njac[i + 1][0][4];
lhs[i][CC][1][4] = tmp2 * fjac[i + 1][1][4]
- tmp1 * njac[i + 1][1][4];
lhs[i][CC][2][4] = tmp2 * fjac[i + 1][2][4]
- tmp1 * njac[i + 1][2][4];
lhs[i][CC][3][4] = tmp2 * fjac[i + 1][3][4]
- tmp1 * njac[i + 1][3][4];
lhs[i][CC][4][4] = tmp2 * fjac[i + 1][4][4]
- tmp1 * njac[i + 1][4][4]
- tmp1 * dx5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(IMAX) and rhs'(IMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][j][0] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][j][0] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (i = 1; i <= isize - 1; i++)
{
//-------------------------------------------------------------------
// rhs(i) = rhs(i) - A*rhs(i-1)
//-------------------------------------------------------------------
matvec_sub(lhs[i][AA], rhs[k][j][i - 1], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(i) = B(i) - C(i-1)*A(i)
//-------------------------------------------------------------------
matmul_sub(lhs[i][AA], lhs[i - 1][CC], lhs[i][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][j][0] by b_inverse[k][j][0] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[i][BB], lhs[i][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(isize) = rhs(isize) - A*rhs(isize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[isize][AA], rhs[k][j][isize - 1], rhs[k][j][isize]);
//---------------------------------------------------------------------
// B(isize) = B(isize) - C(isize-1)*A(isize)
//---------------------------------------------------------------------
matmul_sub(lhs[isize][AA], lhs[isize - 1][CC], lhs[isize][BB]);
//---------------------------------------------------------------------
// multiply rhs() by b_inverse() and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[isize][BB], rhs[k][j][isize] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(isize)=rhs(isize)
// else assume U(isize) is loaded in un pack backsub_info
// so just use it
// after u(istart) will be sent to next cell
//---------------------------------------------------------------------
for (i = isize - 1; i >= 0; i--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[i][CC][n][m] * rhs[k][j][i + 1][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Y direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void y_solve()
{
int i, j, k, m, n, jsize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three y-factors
//---------------------------------------------------------------------
jsize = grid_points[1] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the tri-diagonal matrix;
// determine a (labeled f) and n jacobians for cell c
//---------------------------------------------------------------------
for (k = 1; k <= grid_points[2] - 2; k++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
#pragma omp parallel for default(shared) private(j, tmp1, tmp2, tmp3) firstprivate(jsize, k, i, c2, c1, c3c4, con43, c1345, rho_i, u, qs, square)
for (j = 0; j <= jsize; j++)
{
tmp1 = rho_i[k][j][i];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[j][0][0] = 0.0;
fjac[j][1][0] = 0.0;
fjac[j][2][0] = 1.0;
fjac[j][3][0] = 0.0;
fjac[j][4][0] = 0.0;
fjac[j][0][1] = - ( u[k][j][i][1] * u[k][j][i][2] ) * tmp2;
fjac[j][1][1] = u[k][j][i][2] * tmp1;
fjac[j][2][1] = u[k][j][i][1] * tmp1;
fjac[j][3][1] = 0.0;
fjac[j][4][1] = 0.0;
fjac[j][0][2] = - ( u[k][j][i][2] * u[k][j][i][2] * tmp2)
+ c2 * qs[k][j][i];
fjac[j][1][2] = - c2 * u[k][j][i][1] * tmp1;
fjac[j][2][2] = ( 2.0 - c2 ) * u[k][j][i][2] * tmp1;
fjac[j][3][2] = - c2 * u[k][j][i][3] * tmp1;
fjac[j][4][2] = c2;
fjac[j][0][3] = - ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[j][1][3] = 0.0;
fjac[j][2][3] = u[k][j][i][3] * tmp1;
fjac[j][3][3] = u[k][j][i][2] * tmp1;
fjac[j][4][3] = 0.0;
fjac[j][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][2] * tmp2;
fjac[j][1][4] = - c2 * u[k][j][i][1] * u[k][j][i][2] * tmp2;
fjac[j][2][4] = c1 * u[k][j][i][4] * tmp1
- c2 * ( qs[k][j][i] + u[k][j][i][2] * u[k][j][i][2] * tmp2 );
fjac[j][3][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[j][4][4] = c1 * u[k][j][i][2] * tmp1;
njac[j][0][0] = 0.0;
njac[j][1][0] = 0.0;
njac[j][2][0] = 0.0;
njac[j][3][0] = 0.0;
njac[j][4][0] = 0.0;
njac[j][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[j][1][1] = c3c4 * tmp1;
njac[j][2][1] = 0.0;
njac[j][3][1] = 0.0;
njac[j][4][1] = 0.0;
njac[j][0][2] = - con43 * c3c4 * tmp2 * u[k][j][i][2];
njac[j][1][2] = 0.0;
njac[j][2][2] = con43 * c3c4 * tmp1;
njac[j][3][2] = 0.0;
njac[j][4][2] = 0.0;
njac[j][0][3] = - c3c4 * tmp2 * u[k][j][i][3];
njac[j][1][3] = 0.0;
njac[j][2][3] = 0.0;
njac[j][3][3] = c3c4 * tmp1;
njac[j][4][3] = 0.0;
njac[j][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[j][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[j][2][4] = ( con43 * c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[j][3][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][3];
njac[j][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now joacobians set, so form left hand side in y direction
//---------------------------------------------------------------------
lhsinit(lhs, jsize);
#pragma omp parallel for default(shared) private(j, tmp1, tmp2) firstprivate(jsize, dt, ty1, ty2, dy1, dy2, dy3, dy4, dy5, fjac, njac)
for (j = 1; j <= jsize - 1; j++)
{
tmp1 = dt * ty1;
tmp2 = dt * ty2;
lhs[j][AA][0][0] = - tmp2 * fjac[j - 1][0][0]
- tmp1 * njac[j - 1][0][0]
- tmp1 * dy1;
lhs[j][AA][1][0] = - tmp2 * fjac[j - 1][1][0]
- tmp1 * njac[j - 1][1][0];
lhs[j][AA][2][0] = - tmp2 * fjac[j - 1][2][0]
- tmp1 * njac[j - 1][2][0];
lhs[j][AA][3][0] = - tmp2 * fjac[j - 1][3][0]
- tmp1 * njac[j - 1][3][0];
lhs[j][AA][4][0] = - tmp2 * fjac[j - 1][4][0]
- tmp1 * njac[j - 1][4][0];
lhs[j][AA][0][1] = - tmp2 * fjac[j - 1][0][1]
- tmp1 * njac[j - 1][0][1];
lhs[j][AA][1][1] = - tmp2 * fjac[j - 1][1][1]
- tmp1 * njac[j - 1][1][1]
- tmp1 * dy2;
lhs[j][AA][2][1] = - tmp2 * fjac[j - 1][2][1]
- tmp1 * njac[j - 1][2][1];
lhs[j][AA][3][1] = - tmp2 * fjac[j - 1][3][1]
- tmp1 * njac[j - 1][3][1];
lhs[j][AA][4][1] = - tmp2 * fjac[j - 1][4][1]
- tmp1 * njac[j - 1][4][1];
lhs[j][AA][0][2] = - tmp2 * fjac[j - 1][0][2]
- tmp1 * njac[j - 1][0][2];
lhs[j][AA][1][2] = - tmp2 * fjac[j - 1][1][2]
- tmp1 * njac[j - 1][1][2];
lhs[j][AA][2][2] = - tmp2 * fjac[j - 1][2][2]
- tmp1 * njac[j - 1][2][2]
- tmp1 * dy3;
lhs[j][AA][3][2] = - tmp2 * fjac[j - 1][3][2]
- tmp1 * njac[j - 1][3][2];
lhs[j][AA][4][2] = - tmp2 * fjac[j - 1][4][2]
- tmp1 * njac[j - 1][4][2];
lhs[j][AA][0][3] = - tmp2 * fjac[j - 1][0][3]
- tmp1 * njac[j - 1][0][3];
lhs[j][AA][1][3] = - tmp2 * fjac[j - 1][1][3]
- tmp1 * njac[j - 1][1][3];
lhs[j][AA][2][3] = - tmp2 * fjac[j - 1][2][3]
- tmp1 * njac[j - 1][2][3];
lhs[j][AA][3][3] = - tmp2 * fjac[j - 1][3][3]
- tmp1 * njac[j - 1][3][3]
- tmp1 * dy4;
lhs[j][AA][4][3] = - tmp2 * fjac[j - 1][4][3]
- tmp1 * njac[j - 1][4][3];
lhs[j][AA][0][4] = - tmp2 * fjac[j - 1][0][4]
- tmp1 * njac[j - 1][0][4];
lhs[j][AA][1][4] = - tmp2 * fjac[j - 1][1][4]
- tmp1 * njac[j - 1][1][4];
lhs[j][AA][2][4] = - tmp2 * fjac[j - 1][2][4]
- tmp1 * njac[j - 1][2][4];
lhs[j][AA][3][4] = - tmp2 * fjac[j - 1][3][4]
- tmp1 * njac[j - 1][3][4];
lhs[j][AA][4][4] = - tmp2 * fjac[j - 1][4][4]
- tmp1 * njac[j - 1][4][4]
- tmp1 * dy5;
lhs[j][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[j][0][0]
+ tmp1 * 2.0 * dy1;
lhs[j][BB][1][0] = tmp1 * 2.0 * njac[j][1][0];
lhs[j][BB][2][0] = tmp1 * 2.0 * njac[j][2][0];
lhs[j][BB][3][0] = tmp1 * 2.0 * njac[j][3][0];
lhs[j][BB][4][0] = tmp1 * 2.0 * njac[j][4][0];
lhs[j][BB][0][1] = tmp1 * 2.0 * njac[j][0][1];
lhs[j][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[j][1][1]
+ tmp1 * 2.0 * dy2;
lhs[j][BB][2][1] = tmp1 * 2.0 * njac[j][2][1];
lhs[j][BB][3][1] = tmp1 * 2.0 * njac[j][3][1];
lhs[j][BB][4][1] = tmp1 * 2.0 * njac[j][4][1];
lhs[j][BB][0][2] = tmp1 * 2.0 * njac[j][0][2];
lhs[j][BB][1][2] = tmp1 * 2.0 * njac[j][1][2];
lhs[j][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[j][2][2]
+ tmp1 * 2.0 * dy3;
lhs[j][BB][3][2] = tmp1 * 2.0 * njac[j][3][2];
lhs[j][BB][4][2] = tmp1 * 2.0 * njac[j][4][2];
lhs[j][BB][0][3] = tmp1 * 2.0 * njac[j][0][3];
lhs[j][BB][1][3] = tmp1 * 2.0 * njac[j][1][3];
lhs[j][BB][2][3] = tmp1 * 2.0 * njac[j][2][3];
lhs[j][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[j][3][3]
+ tmp1 * 2.0 * dy4;
lhs[j][BB][4][3] = tmp1 * 2.0 * njac[j][4][3];
lhs[j][BB][0][4] = tmp1 * 2.0 * njac[j][0][4];
lhs[j][BB][1][4] = tmp1 * 2.0 * njac[j][1][4];
lhs[j][BB][2][4] = tmp1 * 2.0 * njac[j][2][4];
lhs[j][BB][3][4] = tmp1 * 2.0 * njac[j][3][4];
lhs[j][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[j][4][4]
+ tmp1 * 2.0 * dy5;
lhs[j][CC][0][0] = tmp2 * fjac[j + 1][0][0]
- tmp1 * njac[j + 1][0][0]
- tmp1 * dy1;
lhs[j][CC][1][0] = tmp2 * fjac[j + 1][1][0]
- tmp1 * njac[j + 1][1][0];
lhs[j][CC][2][0] = tmp2 * fjac[j + 1][2][0]
- tmp1 * njac[j + 1][2][0];
lhs[j][CC][3][0] = tmp2 * fjac[j + 1][3][0]
- tmp1 * njac[j + 1][3][0];
lhs[j][CC][4][0] = tmp2 * fjac[j + 1][4][0]
- tmp1 * njac[j + 1][4][0];
lhs[j][CC][0][1] = tmp2 * fjac[j + 1][0][1]
- tmp1 * njac[j + 1][0][1];
lhs[j][CC][1][1] = tmp2 * fjac[j + 1][1][1]
- tmp1 * njac[j + 1][1][1]
- tmp1 * dy2;
lhs[j][CC][2][1] = tmp2 * fjac[j + 1][2][1]
- tmp1 * njac[j + 1][2][1];
lhs[j][CC][3][1] = tmp2 * fjac[j + 1][3][1]
- tmp1 * njac[j + 1][3][1];
lhs[j][CC][4][1] = tmp2 * fjac[j + 1][4][1]
- tmp1 * njac[j + 1][4][1];
lhs[j][CC][0][2] = tmp2 * fjac[j + 1][0][2]
- tmp1 * njac[j + 1][0][2];
lhs[j][CC][1][2] = tmp2 * fjac[j + 1][1][2]
- tmp1 * njac[j + 1][1][2];
lhs[j][CC][2][2] = tmp2 * fjac[j + 1][2][2]
- tmp1 * njac[j + 1][2][2]
- tmp1 * dy3;
lhs[j][CC][3][2] = tmp2 * fjac[j + 1][3][2]
- tmp1 * njac[j + 1][3][2];
lhs[j][CC][4][2] = tmp2 * fjac[j + 1][4][2]
- tmp1 * njac[j + 1][4][2];
lhs[j][CC][0][3] = tmp2 * fjac[j + 1][0][3]
- tmp1 * njac[j + 1][0][3];
lhs[j][CC][1][3] = tmp2 * fjac[j + 1][1][3]
- tmp1 * njac[j + 1][1][3];
lhs[j][CC][2][3] = tmp2 * fjac[j + 1][2][3]
- tmp1 * njac[j + 1][2][3];
lhs[j][CC][3][3] = tmp2 * fjac[j + 1][3][3]
- tmp1 * njac[j + 1][3][3]
- tmp1 * dy4;
lhs[j][CC][4][3] = tmp2 * fjac[j + 1][4][3]
- tmp1 * njac[j + 1][4][3];
lhs[j][CC][0][4] = tmp2 * fjac[j + 1][0][4]
- tmp1 * njac[j + 1][0][4];
lhs[j][CC][1][4] = tmp2 * fjac[j + 1][1][4]
- tmp1 * njac[j + 1][1][4];
lhs[j][CC][2][4] = tmp2 * fjac[j + 1][2][4]
- tmp1 * njac[j + 1][2][4];
lhs[j][CC][3][4] = tmp2 * fjac[j + 1][3][4]
- tmp1 * njac[j + 1][3][4];
lhs[j][CC][4][4] = tmp2 * fjac[j + 1][4][4]
- tmp1 * njac[j + 1][4][4]
- tmp1 * dy5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(JMAX) and rhs'(JMAX) will be sent to next cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[k][0][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[k][0][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (j = 1; j <= jsize - 1; j++)
{
//-------------------------------------------------------------------
// subtract A*lhs_vector(j-1) from lhs_vector(j)
//
// rhs(j) = rhs(j) - A*rhs(j-1)
//-------------------------------------------------------------------
matvec_sub(lhs[j][AA], rhs[k][j - 1][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(j) = B(j) - C(j-1)*A(j)
//-------------------------------------------------------------------
matmul_sub(lhs[j][AA], lhs[j - 1][CC], lhs[j][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[k][0][i] by b_inverse[k][0][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[j][BB], lhs[j][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// rhs(jsize) = rhs(jsize) - A*rhs(jsize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[jsize][AA], rhs[k][jsize - 1][i], rhs[k][jsize][i]);
//---------------------------------------------------------------------
// B(jsize) = B(jsize) - C(jsize-1)*A(jsize)
// matmul_sub(AA,i,jsize,k,c,
// $ CC,i,jsize-1,k,c,BB,i,jsize,k)
//---------------------------------------------------------------------
matmul_sub(lhs[jsize][AA], lhs[jsize - 1][CC], lhs[jsize][BB]);
//---------------------------------------------------------------------
// multiply rhs(jsize) by b_inverse(jsize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[jsize][BB], rhs[k][jsize][i] );
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(jsize)=rhs(jsize)
// else assume U(jsize) is loaded in un pack backsub_info
// so just use it
// after u(jstart) will be sent to next cell
//---------------------------------------------------------------------
for (j = jsize - 1; j >= 0; j--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[j][CC][n][m] * rhs[k][j + 1][i][n];
}
}
}
}
}
}
//---------------------------------------------------------------------
// Performs line solves in Z direction by first factoring
// the block-tridiagonal matrix into an upper triangular matrix,
// and then performing back substitution to solve for the unknow
// vectors of each line.
//
// Make sure we treat elements zero to cell_size in the direction
// of the sweep.
//---------------------------------------------------------------------
void z_solve()
{
int i, j, k, m, n, ksize;
double fjac[PROBLEM_SIZE + 1][5][5];
double njac[PROBLEM_SIZE + 1][5][5];
double lhs [PROBLEM_SIZE + 1][3][5][5];
double tmp1, tmp2, tmp3;
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// This function computes the left hand side for the three z-factors
//---------------------------------------------------------------------
ksize = grid_points[2] - 1;
//---------------------------------------------------------------------
// Compute the indices for storing the block-diagonal matrix;
// determine c (labeled f) and s jacobians
//---------------------------------------------------------------------
for (j = 1; j <= grid_points[1] - 2; j++)
{
for (i = 1; i <= grid_points[0] - 2; i++)
{
#pragma omp parallel for default(shared) private(k, tmp1, tmp2, tmp3) firstprivate(ksize, j, i, c2, c1, c3c4, con43, c3, c4, c1345, u, qs, square)
for (k = 0; k <= ksize; k++)
{
tmp1 = 1.0 / u[k][j][i][0];
tmp2 = tmp1 * tmp1;
tmp3 = tmp1 * tmp2;
fjac[k][0][0] = 0.0;
fjac[k][1][0] = 0.0;
fjac[k][2][0] = 0.0;
fjac[k][3][0] = 1.0;
fjac[k][4][0] = 0.0;
fjac[k][0][1] = - ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[k][1][1] = u[k][j][i][3] * tmp1;
fjac[k][2][1] = 0.0;
fjac[k][3][1] = u[k][j][i][1] * tmp1;
fjac[k][4][1] = 0.0;
fjac[k][0][2] = - ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[k][1][2] = 0.0;
fjac[k][2][2] = u[k][j][i][3] * tmp1;
fjac[k][3][2] = u[k][j][i][2] * tmp1;
fjac[k][4][2] = 0.0;
fjac[k][0][3] = - (u[k][j][i][3] * u[k][j][i][3] * tmp2 )
+ c2 * qs[k][j][i];
fjac[k][1][3] = - c2 * u[k][j][i][1] * tmp1;
fjac[k][2][3] = - c2 * u[k][j][i][2] * tmp1;
fjac[k][3][3] = ( 2.0 - c2 ) * u[k][j][i][3] * tmp1;
fjac[k][4][3] = c2;
fjac[k][0][4] = ( c2 * 2.0 * square[k][j][i] - c1 * u[k][j][i][4] )
* u[k][j][i][3] * tmp2;
fjac[k][1][4] = - c2 * ( u[k][j][i][1] * u[k][j][i][3] ) * tmp2;
fjac[k][2][4] = - c2 * ( u[k][j][i][2] * u[k][j][i][3] ) * tmp2;
fjac[k][3][4] = c1 * ( u[k][j][i][4] * tmp1 )
- c2 * ( qs[k][j][i] + u[k][j][i][3] * u[k][j][i][3] * tmp2 );
fjac[k][4][4] = c1 * u[k][j][i][3] * tmp1;
njac[k][0][0] = 0.0;
njac[k][1][0] = 0.0;
njac[k][2][0] = 0.0;
njac[k][3][0] = 0.0;
njac[k][4][0] = 0.0;
njac[k][0][1] = - c3c4 * tmp2 * u[k][j][i][1];
njac[k][1][1] = c3c4 * tmp1;
njac[k][2][1] = 0.0;
njac[k][3][1] = 0.0;
njac[k][4][1] = 0.0;
njac[k][0][2] = - c3c4 * tmp2 * u[k][j][i][2];
njac[k][1][2] = 0.0;
njac[k][2][2] = c3c4 * tmp1;
njac[k][3][2] = 0.0;
njac[k][4][2] = 0.0;
njac[k][0][3] = - con43 * c3c4 * tmp2 * u[k][j][i][3];
njac[k][1][3] = 0.0;
njac[k][2][3] = 0.0;
njac[k][3][3] = con43 * c3 * c4 * tmp1;
njac[k][4][3] = 0.0;
njac[k][0][4] = - ( c3c4
- c1345 ) * tmp3 * (u[k][j][i][1] * u[k][j][i][1])
- ( c3c4 - c1345 ) * tmp3 * (u[k][j][i][2] * u[k][j][i][2])
- ( con43 * c3c4
- c1345 ) * tmp3 * (u[k][j][i][3] * u[k][j][i][3])
- c1345 * tmp2 * u[k][j][i][4];
njac[k][1][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][1];
njac[k][2][4] = ( c3c4 - c1345 ) * tmp2 * u[k][j][i][2];
njac[k][3][4] = ( con43 * c3c4
- c1345 ) * tmp2 * u[k][j][i][3];
njac[k][4][4] = ( c1345 ) * tmp1;
}
//---------------------------------------------------------------------
// now jacobians set, so form left hand side in z direction
//---------------------------------------------------------------------
lhsinit(lhs, ksize);
#pragma omp parallel for default(shared) private(k, tmp1, tmp2) firstprivate(ksize, dt, tz1, tz2, dz1, dz2, dz3, dz4, dz5, fjac, njac)
for (k = 1; k <= ksize - 1; k++)
{
tmp1 = dt * tz1;
tmp2 = dt * tz2;
lhs[k][AA][0][0] = - tmp2 * fjac[k - 1][0][0]
- tmp1 * njac[k - 1][0][0]
- tmp1 * dz1;
lhs[k][AA][1][0] = - tmp2 * fjac[k - 1][1][0]
- tmp1 * njac[k - 1][1][0];
lhs[k][AA][2][0] = - tmp2 * fjac[k - 1][2][0]
- tmp1 * njac[k - 1][2][0];
lhs[k][AA][3][0] = - tmp2 * fjac[k - 1][3][0]
- tmp1 * njac[k - 1][3][0];
lhs[k][AA][4][0] = - tmp2 * fjac[k - 1][4][0]
- tmp1 * njac[k - 1][4][0];
lhs[k][AA][0][1] = - tmp2 * fjac[k - 1][0][1]
- tmp1 * njac[k - 1][0][1];
lhs[k][AA][1][1] = - tmp2 * fjac[k - 1][1][1]
- tmp1 * njac[k - 1][1][1]
- tmp1 * dz2;
lhs[k][AA][2][1] = - tmp2 * fjac[k - 1][2][1]
- tmp1 * njac[k - 1][2][1];
lhs[k][AA][3][1] = - tmp2 * fjac[k - 1][3][1]
- tmp1 * njac[k - 1][3][1];
lhs[k][AA][4][1] = - tmp2 * fjac[k - 1][4][1]
- tmp1 * njac[k - 1][4][1];
lhs[k][AA][0][2] = - tmp2 * fjac[k - 1][0][2]
- tmp1 * njac[k - 1][0][2];
lhs[k][AA][1][2] = - tmp2 * fjac[k - 1][1][2]
- tmp1 * njac[k - 1][1][2];
lhs[k][AA][2][2] = - tmp2 * fjac[k - 1][2][2]
- tmp1 * njac[k - 1][2][2]
- tmp1 * dz3;
lhs[k][AA][3][2] = - tmp2 * fjac[k - 1][3][2]
- tmp1 * njac[k - 1][3][2];
lhs[k][AA][4][2] = - tmp2 * fjac[k - 1][4][2]
- tmp1 * njac[k - 1][4][2];
lhs[k][AA][0][3] = - tmp2 * fjac[k - 1][0][3]
- tmp1 * njac[k - 1][0][3];
lhs[k][AA][1][3] = - tmp2 * fjac[k - 1][1][3]
- tmp1 * njac[k - 1][1][3];
lhs[k][AA][2][3] = - tmp2 * fjac[k - 1][2][3]
- tmp1 * njac[k - 1][2][3];
lhs[k][AA][3][3] = - tmp2 * fjac[k - 1][3][3]
- tmp1 * njac[k - 1][3][3]
- tmp1 * dz4;
lhs[k][AA][4][3] = - tmp2 * fjac[k - 1][4][3]
- tmp1 * njac[k - 1][4][3];
lhs[k][AA][0][4] = - tmp2 * fjac[k - 1][0][4]
- tmp1 * njac[k - 1][0][4];
lhs[k][AA][1][4] = - tmp2 * fjac[k - 1][1][4]
- tmp1 * njac[k - 1][1][4];
lhs[k][AA][2][4] = - tmp2 * fjac[k - 1][2][4]
- tmp1 * njac[k - 1][2][4];
lhs[k][AA][3][4] = - tmp2 * fjac[k - 1][3][4]
- tmp1 * njac[k - 1][3][4];
lhs[k][AA][4][4] = - tmp2 * fjac[k - 1][4][4]
- tmp1 * njac[k - 1][4][4]
- tmp1 * dz5;
lhs[k][BB][0][0] = 1.0
+ tmp1 * 2.0 * njac[k][0][0]
+ tmp1 * 2.0 * dz1;
lhs[k][BB][1][0] = tmp1 * 2.0 * njac[k][1][0];
lhs[k][BB][2][0] = tmp1 * 2.0 * njac[k][2][0];
lhs[k][BB][3][0] = tmp1 * 2.0 * njac[k][3][0];
lhs[k][BB][4][0] = tmp1 * 2.0 * njac[k][4][0];
lhs[k][BB][0][1] = tmp1 * 2.0 * njac[k][0][1];
lhs[k][BB][1][1] = 1.0
+ tmp1 * 2.0 * njac[k][1][1]
+ tmp1 * 2.0 * dz2;
lhs[k][BB][2][1] = tmp1 * 2.0 * njac[k][2][1];
lhs[k][BB][3][1] = tmp1 * 2.0 * njac[k][3][1];
lhs[k][BB][4][1] = tmp1 * 2.0 * njac[k][4][1];
lhs[k][BB][0][2] = tmp1 * 2.0 * njac[k][0][2];
lhs[k][BB][1][2] = tmp1 * 2.0 * njac[k][1][2];
lhs[k][BB][2][2] = 1.0
+ tmp1 * 2.0 * njac[k][2][2]
+ tmp1 * 2.0 * dz3;
lhs[k][BB][3][2] = tmp1 * 2.0 * njac[k][3][2];
lhs[k][BB][4][2] = tmp1 * 2.0 * njac[k][4][2];
lhs[k][BB][0][3] = tmp1 * 2.0 * njac[k][0][3];
lhs[k][BB][1][3] = tmp1 * 2.0 * njac[k][1][3];
lhs[k][BB][2][3] = tmp1 * 2.0 * njac[k][2][3];
lhs[k][BB][3][3] = 1.0
+ tmp1 * 2.0 * njac[k][3][3]
+ tmp1 * 2.0 * dz4;
lhs[k][BB][4][3] = tmp1 * 2.0 * njac[k][4][3];
lhs[k][BB][0][4] = tmp1 * 2.0 * njac[k][0][4];
lhs[k][BB][1][4] = tmp1 * 2.0 * njac[k][1][4];
lhs[k][BB][2][4] = tmp1 * 2.0 * njac[k][2][4];
lhs[k][BB][3][4] = tmp1 * 2.0 * njac[k][3][4];
lhs[k][BB][4][4] = 1.0
+ tmp1 * 2.0 * njac[k][4][4]
+ tmp1 * 2.0 * dz5;
lhs[k][CC][0][0] = tmp2 * fjac[k + 1][0][0]
- tmp1 * njac[k + 1][0][0]
- tmp1 * dz1;
lhs[k][CC][1][0] = tmp2 * fjac[k + 1][1][0]
- tmp1 * njac[k + 1][1][0];
lhs[k][CC][2][0] = tmp2 * fjac[k + 1][2][0]
- tmp1 * njac[k + 1][2][0];
lhs[k][CC][3][0] = tmp2 * fjac[k + 1][3][0]
- tmp1 * njac[k + 1][3][0];
lhs[k][CC][4][0] = tmp2 * fjac[k + 1][4][0]
- tmp1 * njac[k + 1][4][0];
lhs[k][CC][0][1] = tmp2 * fjac[k + 1][0][1]
- tmp1 * njac[k + 1][0][1];
lhs[k][CC][1][1] = tmp2 * fjac[k + 1][1][1]
- tmp1 * njac[k + 1][1][1]
- tmp1 * dz2;
lhs[k][CC][2][1] = tmp2 * fjac[k + 1][2][1]
- tmp1 * njac[k + 1][2][1];
lhs[k][CC][3][1] = tmp2 * fjac[k + 1][3][1]
- tmp1 * njac[k + 1][3][1];
lhs[k][CC][4][1] = tmp2 * fjac[k + 1][4][1]
- tmp1 * njac[k + 1][4][1];
lhs[k][CC][0][2] = tmp2 * fjac[k + 1][0][2]
- tmp1 * njac[k + 1][0][2];
lhs[k][CC][1][2] = tmp2 * fjac[k + 1][1][2]
- tmp1 * njac[k + 1][1][2];
lhs[k][CC][2][2] = tmp2 * fjac[k + 1][2][2]
- tmp1 * njac[k + 1][2][2]
- tmp1 * dz3;
lhs[k][CC][3][2] = tmp2 * fjac[k + 1][3][2]
- tmp1 * njac[k + 1][3][2];
lhs[k][CC][4][2] = tmp2 * fjac[k + 1][4][2]
- tmp1 * njac[k + 1][4][2];
lhs[k][CC][0][3] = tmp2 * fjac[k + 1][0][3]
- tmp1 * njac[k + 1][0][3];
lhs[k][CC][1][3] = tmp2 * fjac[k + 1][1][3]
- tmp1 * njac[k + 1][1][3];
lhs[k][CC][2][3] = tmp2 * fjac[k + 1][2][3]
- tmp1 * njac[k + 1][2][3];
lhs[k][CC][3][3] = tmp2 * fjac[k + 1][3][3]
- tmp1 * njac[k + 1][3][3]
- tmp1 * dz4;
lhs[k][CC][4][3] = tmp2 * fjac[k + 1][4][3]
- tmp1 * njac[k + 1][4][3];
lhs[k][CC][0][4] = tmp2 * fjac[k + 1][0][4]
- tmp1 * njac[k + 1][0][4];
lhs[k][CC][1][4] = tmp2 * fjac[k + 1][1][4]
- tmp1 * njac[k + 1][1][4];
lhs[k][CC][2][4] = tmp2 * fjac[k + 1][2][4]
- tmp1 * njac[k + 1][2][4];
lhs[k][CC][3][4] = tmp2 * fjac[k + 1][3][4]
- tmp1 * njac[k + 1][3][4];
lhs[k][CC][4][4] = tmp2 * fjac[k + 1][4][4]
- tmp1 * njac[k + 1][4][4]
- tmp1 * dz5;
}
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// performs guaussian elimination on this cell.
//
// assumes that unpacking routines for non-first cells
// preload C' and rhs' from previous cell.
//
// assumed send happens outside this routine, but that
// c'(KMAX) and rhs'(KMAX) will be sent to next cell.
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// outer most do loops - sweeping in i direction
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// multiply c[0][j][i] by b_inverse and copy back to c
// multiply rhs(0) by b_inverse(0) and copy to rhs
//---------------------------------------------------------------------
binvcrhs( lhs[0][BB], lhs[0][CC], rhs[0][j][i] );
//---------------------------------------------------------------------
// begin inner most do loop
// do all the elements of the cell unless last
//---------------------------------------------------------------------
for (k = 1; k <= ksize - 1; k++)
{
//-------------------------------------------------------------------
// subtract A*lhs_vector(k-1) from lhs_vector(k)
//
// rhs(k) = rhs(k) - A*rhs(k-1)
//-------------------------------------------------------------------
matvec_sub(lhs[k][AA], rhs[k - 1][j][i], rhs[k][j][i]);
//-------------------------------------------------------------------
// B(k) = B(k) - C(k-1)*A(k)
// matmul_sub(AA,i,j,k,c,CC,i,j,k-1,c,BB,i,j,k)
//-------------------------------------------------------------------
matmul_sub(lhs[k][AA], lhs[k - 1][CC], lhs[k][BB]);
//-------------------------------------------------------------------
// multiply c[k][j][i] by b_inverse and copy back to c
// multiply rhs[0][j][i] by b_inverse[0][j][i] and copy to rhs
//-------------------------------------------------------------------
binvcrhs( lhs[k][BB], lhs[k][CC], rhs[k][j][i] );
}
//---------------------------------------------------------------------
// Now finish up special cases for last cell
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// rhs(ksize) = rhs(ksize) - A*rhs(ksize-1)
//---------------------------------------------------------------------
matvec_sub(lhs[ksize][AA], rhs[ksize - 1][j][i], rhs[ksize][j][i]);
//---------------------------------------------------------------------
// B(ksize) = B(ksize) - C(ksize-1)*A(ksize)
// matmul_sub(AA,i,j,ksize,c,
// $ CC,i,j,ksize-1,c,BB,i,j,ksize)
//---------------------------------------------------------------------
matmul_sub(lhs[ksize][AA], lhs[ksize - 1][CC], lhs[ksize][BB]);
//---------------------------------------------------------------------
// multiply rhs(ksize) by b_inverse(ksize) and copy to rhs
//---------------------------------------------------------------------
binvrhs( lhs[ksize][BB], rhs[ksize][j][i] );
//---------------------------------------------------------------------
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// back solve: if last cell, then generate U(ksize)=rhs(ksize)
// else assume U(ksize) is loaded in un pack backsub_info
// so just use it
// after u(kstart) will be sent to next cell
//---------------------------------------------------------------------
for (k = ksize - 1; k >= 0; k--)
{
for (m = 0; m < BLOCK_SIZE; m++)
{
for (n = 0; n < BLOCK_SIZE; n++)
{
rhs[k][j][i][m] = rhs[k][j][i][m]
- lhs[k][CC][n][m] * rhs[k + 1][j][i][n];
}
}
}
}
}
}
void print_results(char *name, char class, int n1, int n2, int n3, int niter,
double t, double mops, char *optype, int verified)
{
char size[16];
int j;
printf( "\n\n %s Benchmark Completed.\n", name );
printf( " Class = %12c\n", class );
// If this is not a grid-based problem (EP, FT, CG), then
// we only print n1, which contains some measure of the
// problem size. In that case, n2 and n3 are both zero.
// Otherwise, we print the grid size n1xn2xn3
if ( ( n2 == 0 ) && ( n3 == 0 ) )
{
if ( ( name[0] == 'E' ) && ( name[1] == 'P' ) )
{
sprintf( size, "%15.0lf", pow(2.0, n1) );
j = 14;
if ( size[j] == '.' )
{
size[j] = ' ';
j--;
}
size[j + 1] = '\0';
printf( " Size = %15s\n", size );
}
else
{
printf( " Size = %12d\n", n1 );
}
}
else
{
printf( " Size = %4dx%4dx%4d\n", n1, n2, n3 );
}
printf( " Iterations = %12d\n", niter );
printf( " Time in seconds = %12.2lf\n", t );
printf( " Mop/s total = %15.2lf\n", mops );
printf( " Operation type = %24s\n", optype );
if ( verified )
printf( " Verification = %12s\n", "SUCCESSFUL" );
else
printf( " Verification = %12s\n", "UNSUCCESSFUL" );
}
void wtime(double *t)
{
static int sec = -1;
struct timeval tv;
gettimeofday(&tv, (void *)0);
if (sec < 0) sec = tv.tv_sec;
*t = (tv.tv_sec - sec) + 1.0e-6 * tv.tv_usec;
}
/*****************************************************************/
/****** E L A P S E D _ T I M E ******/
/*****************************************************************/
double elapsed_time( void )
{
double t;
wtime( &t );
return ( t );
}
/*****************************************************************/
/****** T I M E R _ C L E A R ******/
/*****************************************************************/
void timer_clear( int n )
{
elapsed[n] = 0.0;
}
/*****************************************************************/
/****** T I M E R _ S T A R T ******/
/*****************************************************************/
void timer_start( int n )
{
start[n] = elapsed_time();
}
/*****************************************************************/
/****** T I M E R _ S T O P ******/
/*****************************************************************/
void timer_stop( int n )
{
double t, now;
now = elapsed_time();
t = now - start[n];
elapsed[n] += t;
}
/*****************************************************************/
/****** T I M E R _ R E A D ******/
/*****************************************************************/
double timer_read( int n )
{
return ( elapsed[n] );
}
|
statistic.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% SSSSS TTTTT AAA TTTTT IIIII SSSSS TTTTT IIIII CCCC %
% SS T A A T I SS T I C %
% SSS T AAAAA T I SSS T I C %
% SS T A A T I SS T I C %
% SSSSS T A A T IIIII SSSSS T IIIII CCCC %
% %
% %
% MagickCore Image Statistical Methods %
% %
% Software Design %
% Cristy %
% July 1992 %
% %
% %
% Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% https://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "MagickCore/studio.h"
#include "MagickCore/accelerate-private.h"
#include "MagickCore/animate.h"
#include "MagickCore/artifact.h"
#include "MagickCore/blob.h"
#include "MagickCore/blob-private.h"
#include "MagickCore/cache.h"
#include "MagickCore/cache-private.h"
#include "MagickCore/cache-view.h"
#include "MagickCore/client.h"
#include "MagickCore/color.h"
#include "MagickCore/color-private.h"
#include "MagickCore/colorspace.h"
#include "MagickCore/colorspace-private.h"
#include "MagickCore/composite.h"
#include "MagickCore/composite-private.h"
#include "MagickCore/compress.h"
#include "MagickCore/constitute.h"
#include "MagickCore/display.h"
#include "MagickCore/draw.h"
#include "MagickCore/enhance.h"
#include "MagickCore/exception.h"
#include "MagickCore/exception-private.h"
#include "MagickCore/gem.h"
#include "MagickCore/gem-private.h"
#include "MagickCore/geometry.h"
#include "MagickCore/list.h"
#include "MagickCore/image-private.h"
#include "MagickCore/magic.h"
#include "MagickCore/magick.h"
#include "MagickCore/memory_.h"
#include "MagickCore/module.h"
#include "MagickCore/monitor.h"
#include "MagickCore/monitor-private.h"
#include "MagickCore/option.h"
#include "MagickCore/paint.h"
#include "MagickCore/pixel-accessor.h"
#include "MagickCore/profile.h"
#include "MagickCore/property.h"
#include "MagickCore/quantize.h"
#include "MagickCore/quantum-private.h"
#include "MagickCore/random_.h"
#include "MagickCore/random-private.h"
#include "MagickCore/resource_.h"
#include "MagickCore/segment.h"
#include "MagickCore/semaphore.h"
#include "MagickCore/signature-private.h"
#include "MagickCore/statistic.h"
#include "MagickCore/string_.h"
#include "MagickCore/thread-private.h"
#include "MagickCore/timer.h"
#include "MagickCore/utility.h"
#include "MagickCore/version.h"
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% E v a l u a t e I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% EvaluateImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the EvaluateImage method is:
%
% MagickBooleanType EvaluateImage(Image *image,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
% MagickBooleanType EvaluateImages(Image *images,
% const MagickEvaluateOperator op,const double value,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o op: A channel op.
%
% o value: A value value.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _PixelChannels
{
double
channel[CompositePixelChannel];
} PixelChannels;
static PixelChannels **DestroyPixelThreadSet(PixelChannels **pixels)
{
register ssize_t
i;
assert(pixels != (PixelChannels **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixels[i] != (PixelChannels *) NULL)
pixels[i]=(PixelChannels *) RelinquishMagickMemory(pixels[i]);
pixels=(PixelChannels **) RelinquishMagickMemory(pixels);
return(pixels);
}
static PixelChannels **AcquirePixelThreadSet(const Image *image)
{
PixelChannels
**pixels;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixels=(PixelChannels **) AcquireQuantumMemory(number_threads,
sizeof(*pixels));
if (pixels == (PixelChannels **) NULL)
return((PixelChannels **) NULL);
(void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels));
for (i=0; i < (ssize_t) number_threads; i++)
{
register ssize_t
j;
pixels[i]=(PixelChannels *) AcquireQuantumMemory(image->columns,
sizeof(**pixels));
if (pixels[i] == (PixelChannels *) NULL)
return(DestroyPixelThreadSet(pixels));
for (j=0; j < (ssize_t) image->columns; j++)
{
register ssize_t
k;
for (k=0; k < MaxPixelChannels; k++)
pixels[i][j].channel[k]=0.0;
}
}
return(pixels);
}
static inline double EvaluateMax(const double x,const double y)
{
if (x > y)
return(x);
return(y);
}
#if defined(__cplusplus) || defined(c_plusplus)
extern "C" {
#endif
static int IntensityCompare(const void *x,const void *y)
{
const PixelChannels
*color_1,
*color_2;
double
distance;
register ssize_t
i;
color_1=(const PixelChannels *) x;
color_2=(const PixelChannels *) y;
distance=0.0;
for (i=0; i < MaxPixelChannels; i++)
distance+=color_1->channel[i]-(double) color_2->channel[i];
return(distance < 0 ? -1 : distance > 0 ? 1 : 0);
}
#if defined(__cplusplus) || defined(c_plusplus)
}
#endif
static double ApplyEvaluateOperator(RandomInfo *random_info,const Quantum pixel,
const MagickEvaluateOperator op,const double value)
{
double
result;
result=0.0;
switch (op)
{
case UndefinedEvaluateOperator:
break;
case AbsEvaluateOperator:
{
result=(double) fabs((double) (pixel+value));
break;
}
case AddEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case AddModulusEvaluateOperator:
{
/*
This returns a 'floored modulus' of the addition which is a positive
result. It differs from % or fmod() that returns a 'truncated modulus'
result, where floor() is replaced by trunc() and could return a
negative result (which is clipped).
*/
result=pixel+value;
result-=(QuantumRange+1.0)*floor((double) result/(QuantumRange+1.0));
break;
}
case AndEvaluateOperator:
{
result=(double) ((size_t) pixel & (size_t) (value+0.5));
break;
}
case CosineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*cos((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case DivideEvaluateOperator:
{
result=pixel/(value == 0.0 ? 1.0 : value);
break;
}
case ExponentialEvaluateOperator:
{
result=(double) (QuantumRange*exp((double) (value*QuantumScale*pixel)));
break;
}
case GaussianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
GaussianNoise,value);
break;
}
case ImpulseNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,ImpulseNoise,
value);
break;
}
case LaplacianNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
LaplacianNoise,value);
break;
}
case LeftShiftEvaluateOperator:
{
result=(double) ((size_t) pixel << (size_t) (value+0.5));
break;
}
case LogEvaluateOperator:
{
if ((QuantumScale*pixel) >= MagickEpsilon)
result=(double) (QuantumRange*log((double) (QuantumScale*value*pixel+
1.0))/log((double) (value+1.0)));
break;
}
case MaxEvaluateOperator:
{
result=(double) EvaluateMax((double) pixel,value);
break;
}
case MeanEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MedianEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case MinEvaluateOperator:
{
result=(double) MagickMin((double) pixel,value);
break;
}
case MultiplicativeNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,
MultiplicativeGaussianNoise,value);
break;
}
case MultiplyEvaluateOperator:
{
result=(double) (value*pixel);
break;
}
case OrEvaluateOperator:
{
result=(double) ((size_t) pixel | (size_t) (value+0.5));
break;
}
case PoissonNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,PoissonNoise,
value);
break;
}
case PowEvaluateOperator:
{
result=(double) (QuantumRange*pow((double) (QuantumScale*pixel),(double)
value));
break;
}
case RightShiftEvaluateOperator:
{
result=(double) ((size_t) pixel >> (size_t) (value+0.5));
break;
}
case RootMeanSquareEvaluateOperator:
{
result=(double) (pixel*pixel+value);
break;
}
case SetEvaluateOperator:
{
result=value;
break;
}
case SineEvaluateOperator:
{
result=(double) (QuantumRange*(0.5*sin((double) (2.0*MagickPI*
QuantumScale*pixel*value))+0.5));
break;
}
case SubtractEvaluateOperator:
{
result=(double) (pixel-value);
break;
}
case SumEvaluateOperator:
{
result=(double) (pixel+value);
break;
}
case ThresholdEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : QuantumRange);
break;
}
case ThresholdBlackEvaluateOperator:
{
result=(double) (((double) pixel <= value) ? 0 : pixel);
break;
}
case ThresholdWhiteEvaluateOperator:
{
result=(double) (((double) pixel > value) ? QuantumRange : pixel);
break;
}
case UniformNoiseEvaluateOperator:
{
result=(double) GenerateDifferentialNoise(random_info,pixel,UniformNoise,
value);
break;
}
case XorEvaluateOperator:
{
result=(double) ((size_t) pixel ^ (size_t) (value+0.5));
break;
}
}
return(result);
}
MagickExport Image *EvaluateImages(const Image *images,
const MagickEvaluateOperator op,ExceptionInfo *exception)
{
#define EvaluateImageTag "Evaluate/Image"
CacheView
*evaluate_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict evaluate_pixels;
RandomInfo
**magick_restrict random_info;
size_t
number_images;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
evaluate_pixels=AcquirePixelThreadSet(images);
if (evaluate_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Evaluate image pixels.
*/
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
evaluate_view=AcquireAuthenticCacheView(image,exception);
if (op == MedianEvaluateOperator)
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
j,
k;
for (j=0; j < (ssize_t) number_images; j++)
for (k=0; k < MaxPixelChannels; k++)
evaluate_pixel[j].channel[k]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
register ssize_t
i;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,x,y,1,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
PixelTrait traits=GetPixelChannelTraits(next,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[j].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),op,
evaluate_pixel[j].channel[i]);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
qsort((void *) evaluate_pixel,number_images,sizeof(*evaluate_pixel),
IntensityCompare);
for (k=0; k < (ssize_t) GetPixelChannels(image); k++)
q[k]=ClampToQuantum(evaluate_pixel[j/2].channel[k]);
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
else
{
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,images,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*evaluate_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(evaluate_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
evaluate_pixel=evaluate_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
evaluate_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,
exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(next,p) == 0)
{
p+=GetPixelChannels(next);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(next,channel);
PixelTrait evaluate_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(evaluate_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
evaluate_pixel[x].channel[i]=ApplyEvaluateOperator(
random_info[id],GetPixelChannel(image,channel,p),j == 0 ?
AddEvaluateOperator : op,evaluate_pixel[x].channel[i]);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
switch (op)
{
case MeanEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]/=(double) number_images;
break;
}
case MultiplyEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
register ssize_t
j;
for (j=0; j < (ssize_t) (number_images-1); j++)
evaluate_pixel[x].channel[i]*=QuantumScale;
}
break;
}
case RootMeanSquareEvaluateOperator:
{
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
evaluate_pixel[x].channel[i]=sqrt(evaluate_pixel[x].channel[i]/
number_images);
break;
}
default:
break;
}
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(evaluate_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(evaluate_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImages)
#endif
proceed=SetImageProgress(images,EvaluateImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
}
evaluate_view=DestroyCacheView(evaluate_view);
evaluate_pixels=DestroyPixelThreadSet(evaluate_pixels);
random_info=DestroyRandomInfoThreadSet(random_info);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
MagickExport MagickBooleanType EvaluateImage(Image *image,
const MagickEvaluateOperator op,const double value,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
RandomInfo
**magick_restrict random_info;
ssize_t
y;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
unsigned long
key;
#endif
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
random_info=AcquireRandomInfoThreadSet();
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
key=GetRandomSecretKey(random_info[0]);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,key == ~0UL)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
double
result;
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if (((traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,q) == 0))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
result=ApplyEvaluateOperator(random_info[id],q[i],op,value);
if (op == MeanEvaluateOperator)
result/=2.0;
q[i]=ClampToQuantum(result);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_EvaluateImage)
#endif
proceed=SetImageProgress(image,EvaluateImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
random_info=DestroyRandomInfoThreadSet(random_info);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% F u n c t i o n I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% FunctionImage() applies a value to the image with an arithmetic, relational,
% or logical operator to an image. Use these operations to lighten or darken
% an image, to increase or decrease contrast in an image, or to produce the
% "negative" of an image.
%
% The format of the FunctionImage method is:
%
% MagickBooleanType FunctionImage(Image *image,
% const MagickFunction function,const ssize_t number_parameters,
% const double *parameters,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o function: A channel function.
%
% o parameters: one or more parameters.
%
% o exception: return any errors or warnings in this structure.
%
*/
static Quantum ApplyFunction(Quantum pixel,const MagickFunction function,
const size_t number_parameters,const double *parameters,
ExceptionInfo *exception)
{
double
result;
register ssize_t
i;
(void) exception;
result=0.0;
switch (function)
{
case PolynomialFunction:
{
/*
Polynomial: polynomial constants, highest to lowest order (e.g. c0*x^3+
c1*x^2+c2*x+c3).
*/
result=0.0;
for (i=0; i < (ssize_t) number_parameters; i++)
result=result*QuantumScale*pixel+parameters[i];
result*=QuantumRange;
break;
}
case SinusoidFunction:
{
double
amplitude,
bias,
frequency,
phase;
/*
Sinusoid: frequency, phase, amplitude, bias.
*/
frequency=(number_parameters >= 1) ? parameters[0] : 1.0;
phase=(number_parameters >= 2) ? parameters[1] : 0.0;
amplitude=(number_parameters >= 3) ? parameters[2] : 0.5;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (QuantumRange*(amplitude*sin((double) (2.0*
MagickPI*(frequency*QuantumScale*pixel+phase/360.0)))+bias));
break;
}
case ArcsinFunction:
{
double
bias,
center,
range,
width;
/*
Arcsin (peged at range limits for invalid results): width, center,
range, and bias.
*/
width=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=2.0/width*(QuantumScale*pixel-center);
if ( result <= -1.0 )
result=bias-range/2.0;
else
if (result >= 1.0)
result=bias+range/2.0;
else
result=(double) (range/MagickPI*asin((double) result)+bias);
result*=QuantumRange;
break;
}
case ArctanFunction:
{
double
center,
bias,
range,
slope;
/*
Arctan: slope, center, range, and bias.
*/
slope=(number_parameters >= 1) ? parameters[0] : 1.0;
center=(number_parameters >= 2) ? parameters[1] : 0.5;
range=(number_parameters >= 3) ? parameters[2] : 1.0;
bias=(number_parameters >= 4) ? parameters[3] : 0.5;
result=(double) (MagickPI*slope*(QuantumScale*pixel-center));
result=(double) (QuantumRange*(range/MagickPI*atan((double)
result)+bias));
break;
}
case UndefinedFunction:
break;
}
return(ClampToQuantum(result));
}
MagickExport MagickBooleanType FunctionImage(Image *image,
const MagickFunction function,const size_t number_parameters,
const double *parameters,ExceptionInfo *exception)
{
#define FunctionImageTag "Function/Image "
CacheView
*image_view;
MagickBooleanType
status;
MagickOffsetType
progress;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
#if defined(MAGICKCORE_OPENCL_SUPPORT)
if (AccelerateFunctionImage(image,function,number_parameters,parameters,
exception) != MagickFalse)
return(MagickTrue);
#endif
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
image_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ApplyFunction(q[i],function,number_parameters,parameters,
exception);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_FunctionImage)
#endif
proceed=SetImageProgress(image,FunctionImageTag,progress++,image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E n t r o p y %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageEntropy() returns the entropy of one or more image channels.
%
% The format of the GetImageEntropy method is:
%
% MagickBooleanType GetImageEntropy(const Image *image,double *entropy,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o entropy: the average entropy of the selected channels.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageEntropy(const Image *image,
double *entropy,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*entropy=channel_statistics[CompositePixelChannel].entropy;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e E x t r e m a %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageExtrema() returns the extrema of one or more image channels.
%
% The format of the GetImageExtrema method is:
%
% MagickBooleanType GetImageExtrema(const Image *image,size_t *minima,
% size_t *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageExtrema(const Image *image,
size_t *minima,size_t *maxima,ExceptionInfo *exception)
{
double
max,
min;
MagickBooleanType
status;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=GetImageRange(image,&min,&max,exception);
*minima=(size_t) ceil(min-0.5);
*maxima=(size_t) floor(max+0.5);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e K u r t o s i s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageKurtosis() returns the kurtosis and skewness of one or more image
% channels.
%
% The format of the GetImageKurtosis method is:
%
% MagickBooleanType GetImageKurtosis(const Image *image,double *kurtosis,
% double *skewness,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o kurtosis: the kurtosis of the channel.
%
% o skewness: the skewness of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageKurtosis(const Image *image,
double *kurtosis,double *skewness,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*kurtosis=channel_statistics[CompositePixelChannel].kurtosis;
*skewness=channel_statistics[CompositePixelChannel].skewness;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M e a n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMean() returns the mean and standard deviation of one or more image
% channels.
%
% The format of the GetImageMean method is:
%
% MagickBooleanType GetImageMean(const Image *image,double *mean,
% double *standard_deviation,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o mean: the average value in the channel.
%
% o standard_deviation: the standard deviation of the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageMean(const Image *image,double *mean,
double *standard_deviation,ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_statistics=GetImageStatistics(image,exception);
if (channel_statistics == (ChannelStatistics *) NULL)
return(MagickFalse);
*mean=channel_statistics[CompositePixelChannel].mean;
*standard_deviation=
channel_statistics[CompositePixelChannel].standard_deviation;
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e M o m e n t s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageMoments() returns the normalized moments of one or more image
% channels.
%
% The format of the GetImageMoments method is:
%
% ChannelMoments *GetImageMoments(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static size_t GetImageChannels(const Image *image)
{
register ssize_t
i;
size_t
channels;
channels=0;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
channels++;
}
return((size_t) (channels == 0 ? 1 : channels));
}
MagickExport ChannelMoments *GetImageMoments(const Image *image,
ExceptionInfo *exception)
{
#define MaxNumberImageMoments 8
CacheView
*image_view;
ChannelMoments
*channel_moments;
double
M00[MaxPixelChannels+1],
M01[MaxPixelChannels+1],
M02[MaxPixelChannels+1],
M03[MaxPixelChannels+1],
M10[MaxPixelChannels+1],
M11[MaxPixelChannels+1],
M12[MaxPixelChannels+1],
M20[MaxPixelChannels+1],
M21[MaxPixelChannels+1],
M22[MaxPixelChannels+1],
M30[MaxPixelChannels+1];
PointInfo
centroid[MaxPixelChannels+1];
ssize_t
channel,
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
channel_moments=(ChannelMoments *) AcquireQuantumMemory(MaxPixelChannels+1,
sizeof(*channel_moments));
if (channel_moments == (ChannelMoments *) NULL)
return(channel_moments);
(void) ResetMagickMemory(channel_moments,0,(MaxPixelChannels+1)*
sizeof(*channel_moments));
(void) ResetMagickMemory(centroid,0,sizeof(centroid));
(void) ResetMagickMemory(M00,0,sizeof(M00));
(void) ResetMagickMemory(M01,0,sizeof(M01));
(void) ResetMagickMemory(M02,0,sizeof(M02));
(void) ResetMagickMemory(M03,0,sizeof(M03));
(void) ResetMagickMemory(M10,0,sizeof(M10));
(void) ResetMagickMemory(M11,0,sizeof(M11));
(void) ResetMagickMemory(M12,0,sizeof(M12));
(void) ResetMagickMemory(M20,0,sizeof(M20));
(void) ResetMagickMemory(M21,0,sizeof(M21));
(void) ResetMagickMemory(M22,0,sizeof(M22));
(void) ResetMagickMemory(M30,0,sizeof(M30));
image_view=AcquireVirtualCacheView(image,exception);
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute center of mass (centroid).
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M00[channel]+=QuantumScale*p[i];
M00[MaxPixelChannels]+=QuantumScale*p[i];
M10[channel]+=x*QuantumScale*p[i];
M10[MaxPixelChannels]+=x*QuantumScale*p[i];
M01[channel]+=y*QuantumScale*p[i];
M01[MaxPixelChannels]+=y*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute center of mass (centroid).
*/
if (M00[channel] < MagickEpsilon)
{
M00[channel]+=MagickEpsilon;
centroid[channel].x=(double) image->columns/2.0;
centroid[channel].y=(double) image->rows/2.0;
continue;
}
M00[channel]+=MagickEpsilon;
centroid[channel].x=M10[channel]/M00[channel];
centroid[channel].y=M01[channel]/M00[channel];
}
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute the image moments.
*/
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
M11[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M11[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
QuantumScale*p[i];
M20[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M20[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
QuantumScale*p[i];
M02[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M02[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
QuantumScale*p[i];
M21[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M21[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[channel]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M12[MaxPixelChannels]+=(x-centroid[channel].x)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M22[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M22[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(y-centroid[channel].y)*(y-centroid[channel].y)*QuantumScale*p[i];
M30[channel]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M30[MaxPixelChannels]+=(x-centroid[channel].x)*(x-centroid[channel].x)*
(x-centroid[channel].x)*QuantumScale*p[i];
M03[channel]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
M03[MaxPixelChannels]+=(y-centroid[channel].y)*(y-centroid[channel].y)*
(y-centroid[channel].y)*QuantumScale*p[i];
}
p+=GetPixelChannels(image);
}
}
M00[MaxPixelChannels]/=GetImageChannels(image);
M01[MaxPixelChannels]/=GetImageChannels(image);
M02[MaxPixelChannels]/=GetImageChannels(image);
M03[MaxPixelChannels]/=GetImageChannels(image);
M10[MaxPixelChannels]/=GetImageChannels(image);
M11[MaxPixelChannels]/=GetImageChannels(image);
M12[MaxPixelChannels]/=GetImageChannels(image);
M20[MaxPixelChannels]/=GetImageChannels(image);
M21[MaxPixelChannels]/=GetImageChannels(image);
M22[MaxPixelChannels]/=GetImageChannels(image);
M30[MaxPixelChannels]/=GetImageChannels(image);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute elliptical angle, major and minor axes, eccentricity, & intensity.
*/
channel_moments[channel].centroid=centroid[channel];
channel_moments[channel].ellipse_axis.x=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])+sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_axis.y=sqrt((2.0/M00[channel])*
((M20[channel]+M02[channel])-sqrt(4.0*M11[channel]*M11[channel]+
(M20[channel]-M02[channel])*(M20[channel]-M02[channel]))));
channel_moments[channel].ellipse_angle=RadiansToDegrees(0.5*atan(2.0*
M11[channel]/(M20[channel]-M02[channel]+MagickEpsilon)));
if (fabs(M11[channel]) < MagickEpsilon)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
else
if (M11[channel] < 0.0)
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=180.0;
}
else
{
if (fabs(M20[channel]-M02[channel]) < MagickEpsilon)
channel_moments[channel].ellipse_angle+=0.0;
else
if ((M20[channel]-M02[channel]) < 0.0)
channel_moments[channel].ellipse_angle+=90.0;
else
channel_moments[channel].ellipse_angle+=0.0;
}
channel_moments[channel].ellipse_eccentricity=sqrt(1.0-(
channel_moments[channel].ellipse_axis.y/
(channel_moments[channel].ellipse_axis.x+MagickEpsilon)));
channel_moments[channel].ellipse_intensity=M00[channel]/
(MagickPI*channel_moments[channel].ellipse_axis.x*
channel_moments[channel].ellipse_axis.y+MagickEpsilon);
}
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Normalize image moments.
*/
M10[channel]=0.0;
M01[channel]=0.0;
M11[channel]/=pow(M00[channel],1.0+(1.0+1.0)/2.0);
M20[channel]/=pow(M00[channel],1.0+(2.0+0.0)/2.0);
M02[channel]/=pow(M00[channel],1.0+(0.0+2.0)/2.0);
M21[channel]/=pow(M00[channel],1.0+(2.0+1.0)/2.0);
M12[channel]/=pow(M00[channel],1.0+(1.0+2.0)/2.0);
M22[channel]/=pow(M00[channel],1.0+(2.0+2.0)/2.0);
M30[channel]/=pow(M00[channel],1.0+(3.0+0.0)/2.0);
M03[channel]/=pow(M00[channel],1.0+(0.0+3.0)/2.0);
M00[channel]=1.0;
}
image_view=DestroyCacheView(image_view);
for (channel=0; channel <= MaxPixelChannels; channel++)
{
/*
Compute Hu invariant moments.
*/
channel_moments[channel].invariant[0]=M20[channel]+M02[channel];
channel_moments[channel].invariant[1]=(M20[channel]-M02[channel])*
(M20[channel]-M02[channel])+4.0*M11[channel]*M11[channel];
channel_moments[channel].invariant[2]=(M30[channel]-3.0*M12[channel])*
(M30[channel]-3.0*M12[channel])+(3.0*M21[channel]-M03[channel])*
(3.0*M21[channel]-M03[channel]);
channel_moments[channel].invariant[3]=(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])+(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]);
channel_moments[channel].invariant[4]=(M30[channel]-3.0*M12[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))+(3.0*M21[channel]-M03[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[5]=(M20[channel]-M02[channel])*
((M30[channel]+M12[channel])*(M30[channel]+M12[channel])-
(M21[channel]+M03[channel])*(M21[channel]+M03[channel]))+
4.0*M11[channel]*(M30[channel]+M12[channel])*(M21[channel]+M03[channel]);
channel_moments[channel].invariant[6]=(3.0*M21[channel]-M03[channel])*
(M30[channel]+M12[channel])*((M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-3.0*(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]))-(M30[channel]-3*M12[channel])*
(M21[channel]+M03[channel])*(3.0*(M30[channel]+M12[channel])*
(M30[channel]+M12[channel])-(M21[channel]+M03[channel])*
(M21[channel]+M03[channel]));
channel_moments[channel].invariant[7]=M11[channel]*((M30[channel]+
M12[channel])*(M30[channel]+M12[channel])-(M03[channel]+M21[channel])*
(M03[channel]+M21[channel]))-(M20[channel]-M02[channel])*
(M30[channel]+M12[channel])*(M03[channel]+M21[channel]);
}
if (y < (ssize_t) image->rows)
channel_moments=(ChannelMoments *) RelinquishMagickMemory(channel_moments);
return(channel_moments);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e C h a n n e l P e r c e p t u a l H a s h %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImagePerceptualHash() returns the perceptual hash of one or more
% image channels.
%
% The format of the GetImagePerceptualHash method is:
%
% ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
static inline double MagickLog10(const double x)
{
#define Log10Epsilon (1.0e-11)
if (fabs(x) < Log10Epsilon)
return(log10(Log10Epsilon));
return(log10(fabs(x)));
}
MagickExport ChannelPerceptualHash *GetImagePerceptualHash(const Image *image,
ExceptionInfo *exception)
{
ChannelPerceptualHash
*perceptual_hash;
char
*colorspaces,
*q;
const char
*artifact;
MagickBooleanType
status;
register char
*p;
register ssize_t
i;
perceptual_hash=(ChannelPerceptualHash *) AcquireQuantumMemory(
MaxPixelChannels+1UL,sizeof(*perceptual_hash));
if (perceptual_hash == (ChannelPerceptualHash *) NULL)
return((ChannelPerceptualHash *) NULL);
artifact=GetImageArtifact(image,"phash:colorspaces");
if (artifact != NULL)
colorspaces=AcquireString(artifact);
else
colorspaces=AcquireString("sRGB,HCLp");
perceptual_hash[0].number_colorspaces=0;
perceptual_hash[0].number_channels=0;
q=colorspaces;
for (i=0; (p=StringToken(",",&q)) != (char *) NULL; i++)
{
ChannelMoments
*moments;
Image
*hash_image;
size_t
j;
ssize_t
channel,
colorspace;
if (i >= MaximumNumberOfPerceptualColorspaces)
break;
colorspace=ParseCommandOption(MagickColorspaceOptions,MagickFalse,p);
if (colorspace < 0)
break;
perceptual_hash[0].colorspace[i]=(ColorspaceType) colorspace;
hash_image=BlurImage(image,0.0,1.0,exception);
if (hash_image == (Image *) NULL)
break;
hash_image->depth=8;
status=TransformImageColorspace(hash_image,(ColorspaceType) colorspace,
exception);
if (status == MagickFalse)
break;
moments=GetImageMoments(hash_image,exception);
perceptual_hash[0].number_colorspaces++;
perceptual_hash[0].number_channels+=GetImageChannels(hash_image);
hash_image=DestroyImage(hash_image);
if (moments == (ChannelMoments *) NULL)
break;
for (channel=0; channel <= MaxPixelChannels; channel++)
for (j=0; j < MaximumNumberOfImageMoments; j++)
perceptual_hash[channel].phash[i][j]=
(-MagickLog10(moments[channel].invariant[j]));
moments=(ChannelMoments *) RelinquishMagickMemory(moments);
}
colorspaces=DestroyString(colorspaces);
return(perceptual_hash);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e R a n g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageRange() returns the range of one or more image channels.
%
% The format of the GetImageRange method is:
%
% MagickBooleanType GetImageRange(const Image *image,double *minima,
% double *maxima,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o minima: the minimum value in the channel.
%
% o maxima: the maximum value in the channel.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport MagickBooleanType GetImageRange(const Image *image,double *minima,
double *maxima,ExceptionInfo *exception)
{
CacheView
*image_view;
MagickBooleanType
initialize,
status;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
status=MagickTrue;
initialize=MagickTrue;
*maxima=0.0;
*minima=0.0;
image_view=AcquireVirtualCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(status,initialize) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
double
row_maxima = 0.0,
row_minima = 0.0;
MagickBooleanType
row_initialize;
register const Quantum
*magick_restrict p;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
status=MagickFalse;
continue;
}
row_initialize=MagickTrue;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (row_initialize != MagickFalse)
{
row_minima=(double) p[i];
row_maxima=(double) p[i];
row_initialize=MagickFalse;
}
else
{
if ((double) p[i] < row_minima)
row_minima=(double) p[i];
if ((double) p[i] > row_maxima)
row_maxima=(double) p[i];
}
}
p+=GetPixelChannels(image);
}
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageRange)
#endif
{
if (initialize != MagickFalse)
{
*minima=row_minima;
*maxima=row_maxima;
initialize=MagickFalse;
}
else
{
if (row_minima < *minima)
*minima=row_minima;
if (row_maxima > *maxima)
*maxima=row_maxima;
}
}
}
image_view=DestroyCacheView(image_view);
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e S t a t i s t i c s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageStatistics() returns statistics for each channel in the image. The
% statistics include the channel depth, its minima, maxima, mean, standard
% deviation, kurtosis and skewness. You can access the red channel mean, for
% example, like this:
%
% channel_statistics=GetImageStatistics(image,exception);
% red_mean=channel_statistics[RedPixelChannel].mean;
%
% Use MagickRelinquishMemory() to free the statistics buffer.
%
% The format of the GetImageStatistics method is:
%
% ChannelStatistics *GetImageStatistics(const Image *image,
% ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport ChannelStatistics *GetImageStatistics(const Image *image,
ExceptionInfo *exception)
{
ChannelStatistics
*channel_statistics;
double
area,
*histogram,
standard_deviation;
MagickStatusType
status;
QuantumAny
range;
register ssize_t
i;
size_t
depth;
ssize_t
y;
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)*
sizeof(*histogram));
channel_statistics=(ChannelStatistics *) AcquireQuantumMemory(
MaxPixelChannels+1,sizeof(*channel_statistics));
if ((channel_statistics == (ChannelStatistics *) NULL) ||
(histogram == (double *) NULL))
{
if (histogram != (double *) NULL)
histogram=(double *) RelinquishMagickMemory(histogram);
if (channel_statistics != (ChannelStatistics *) NULL)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
(void) ResetMagickMemory(channel_statistics,0,(MaxPixelChannels+1)*
sizeof(*channel_statistics));
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
channel_statistics[i].depth=1;
channel_statistics[i].maxima=(-MagickMaximumValue);
channel_statistics[i].minima=MagickMaximumValue;
}
(void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)*
sizeof(*histogram));
for (y=0; y < (ssize_t) image->rows; y++)
{
register const Quantum
*magick_restrict p;
register ssize_t
x;
/*
Compute pixel statistics.
*/
p=GetVirtualPixels(image,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
break;
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,p) == 0)
{
p+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
if (channel_statistics[channel].depth != MAGICKCORE_QUANTUM_DEPTH)
{
depth=channel_statistics[channel].depth;
range=GetQuantumRange(depth);
status=p[i] != ScaleAnyToQuantum(ScaleQuantumToAny(p[i],range),
range) ? MagickTrue : MagickFalse;
if (status != MagickFalse)
{
channel_statistics[channel].depth++;
i--;
continue;
}
}
if ((double) p[i] < channel_statistics[channel].minima)
channel_statistics[channel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[channel].maxima)
channel_statistics[channel].maxima=(double) p[i];
channel_statistics[channel].sum+=p[i];
channel_statistics[channel].sum_squared+=(double) p[i]*p[i];
channel_statistics[channel].sum_cubed+=(double) p[i]*p[i]*p[i];
channel_statistics[channel].sum_fourth_power+=(double) p[i]*p[i]*p[i]*
p[i];
channel_statistics[channel].area++;
if ((double) p[i] < channel_statistics[CompositePixelChannel].minima)
channel_statistics[CompositePixelChannel].minima=(double) p[i];
if ((double) p[i] > channel_statistics[CompositePixelChannel].maxima)
channel_statistics[CompositePixelChannel].maxima=(double) p[i];
histogram[GetPixelChannels(image)*ScaleQuantumToMap(
ClampToQuantum((double) p[i]))+i]++;
channel_statistics[CompositePixelChannel].sum+=(double) p[i];
channel_statistics[CompositePixelChannel].sum_squared+=(double)
p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_cubed+=(double)
p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].sum_fourth_power+=(double)
p[i]*p[i]*p[i]*p[i];
channel_statistics[CompositePixelChannel].area++;
}
p+=GetPixelChannels(image);
}
}
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Normalize pixel statistics.
*/
area=PerceptibleReciprocal(channel_statistics[i].area);
channel_statistics[i].sum*=area;
channel_statistics[i].sum_squared*=area;
channel_statistics[i].sum_cubed*=area;
channel_statistics[i].sum_fourth_power*=area;
channel_statistics[i].mean=channel_statistics[i].sum;
channel_statistics[i].variance=channel_statistics[i].sum_squared;
standard_deviation=sqrt(channel_statistics[i].variance-
(channel_statistics[i].mean*channel_statistics[i].mean));
standard_deviation=sqrt(PerceptibleReciprocal(channel_statistics[i].area-
1.0)*channel_statistics[i].area*standard_deviation*standard_deviation);
channel_statistics[i].standard_deviation=standard_deviation;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
double
number_bins;
register ssize_t
j;
/*
Compute pixel entropy.
*/
PixelChannel channel=GetPixelChannelChannel(image,i);
number_bins=0.0;
for (j=0; j <= (ssize_t) MaxMap; j++)
if (histogram[GetPixelChannels(image)*j+i] > 0.0)
number_bins++;
area=PerceptibleReciprocal(channel_statistics[channel].area);
for (j=0; j <= (ssize_t) MaxMap; j++)
{
double
count;
count=area*histogram[GetPixelChannels(image)*j+i];
if (number_bins > MagickEpsilon)
{
channel_statistics[channel].entropy+=-count*MagickLog10(count)/
MagickLog10(number_bins);
channel_statistics[CompositePixelChannel].entropy+=-count*
MagickLog10(count)/MagickLog10(number_bins)/
GetPixelChannels(image);
}
}
}
histogram=(double *) RelinquishMagickMemory(histogram);
for (i=0; i <= (ssize_t) MaxPixelChannels; i++)
{
/*
Compute kurtosis & skewness statistics.
*/
standard_deviation=PerceptibleReciprocal(
channel_statistics[i].standard_deviation);
channel_statistics[i].skewness=(channel_statistics[i].sum_cubed-3.0*
channel_statistics[i].mean*channel_statistics[i].sum_squared+2.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation);
channel_statistics[i].kurtosis=(channel_statistics[i].sum_fourth_power-4.0*
channel_statistics[i].mean*channel_statistics[i].sum_cubed+6.0*
channel_statistics[i].mean*channel_statistics[i].mean*
channel_statistics[i].sum_squared-3.0*channel_statistics[i].mean*
channel_statistics[i].mean*1.0*channel_statistics[i].mean*
channel_statistics[i].mean)*(standard_deviation*standard_deviation*
standard_deviation*standard_deviation)-3.0;
}
if (y < (ssize_t) image->rows)
channel_statistics=(ChannelStatistics *) RelinquishMagickMemory(
channel_statistics);
return(channel_statistics);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% P o l y n o m i a l I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% PolynomialImage() returns a new image where each pixel is the sum of the
% pixels in the image sequence after applying its corresponding terms
% (coefficient and degree pairs).
%
% The format of the PolynomialImage method is:
%
% Image *PolynomialImage(const Image *images,const size_t number_terms,
% const double *terms,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o images: the image sequence.
%
% o number_terms: the number of terms in the list. The actual list length
% is 2 x number_terms + 1 (the constant).
%
% o terms: the list of polynomial coefficients and degree pairs and a
% constant.
%
% o exception: return any errors or warnings in this structure.
%
*/
MagickExport Image *PolynomialImage(const Image *images,
const size_t number_terms,const double *terms,ExceptionInfo *exception)
{
#define PolynomialImageTag "Polynomial/Image"
CacheView
*polynomial_view;
Image
*image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelChannels
**magick_restrict polynomial_pixels;
size_t
number_images;
ssize_t
y;
assert(images != (Image *) NULL);
assert(images->signature == MagickCoreSignature);
if (images->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
image=CloneImage(images,images->columns,images->rows,MagickTrue,
exception);
if (image == (Image *) NULL)
return((Image *) NULL);
if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse)
{
image=DestroyImage(image);
return((Image *) NULL);
}
number_images=GetImageListLength(images);
polynomial_pixels=AcquirePixelThreadSet(images);
if (polynomial_pixels == (PixelChannels **) NULL)
{
image=DestroyImage(image);
(void) ThrowMagickException(exception,GetMagickModule(),
ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename);
return((Image *) NULL);
}
/*
Polynomial image pixels.
*/
status=MagickTrue;
progress=0;
polynomial_view=AcquireAuthenticCacheView(image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,image,image->rows,1)
#endif
for (y=0; y < (ssize_t) image->rows; y++)
{
CacheView
*image_view;
const Image
*next;
const int
id = GetOpenMPThreadId();
register ssize_t
i,
x;
register PixelChannels
*polynomial_pixel;
register Quantum
*magick_restrict q;
ssize_t
j;
if (status == MagickFalse)
continue;
q=QueueCacheViewAuthenticPixels(polynomial_view,0,y,image->columns,1,
exception);
if (q == (Quantum *) NULL)
{
status=MagickFalse;
continue;
}
polynomial_pixel=polynomial_pixels[id];
for (j=0; j < (ssize_t) image->columns; j++)
for (i=0; i < MaxPixelChannels; i++)
polynomial_pixel[j].channel[i]=0.0;
next=images;
for (j=0; j < (ssize_t) number_images; j++)
{
register const Quantum
*p;
if (j >= (ssize_t) number_terms)
continue;
image_view=AcquireVirtualCacheView(next,exception);
p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception);
if (p == (const Quantum *) NULL)
{
image_view=DestroyCacheView(image_view);
break;
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(next,p) == 0)
{
p+=GetPixelChannels(next);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(next); i++)
{
MagickRealType
coefficient,
degree;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(next,channel);
PixelTrait polynomial_traits=GetPixelChannelTraits(image,channel);
if ((traits == UndefinedPixelTrait) ||
(polynomial_traits == UndefinedPixelTrait))
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
coefficient=(MagickRealType) terms[2*j];
degree=(MagickRealType) terms[(j << 1)+1];
polynomial_pixel[x].channel[i]+=coefficient*
pow(QuantumScale*GetPixelChannel(image,channel,p),degree);
}
p+=GetPixelChannels(next);
}
image_view=DestroyCacheView(image_view);
next=GetNextImageInList(next);
}
for (x=0; x < (ssize_t) image->columns; x++)
{
register ssize_t
i;
if (GetPixelWriteMask(image,q) == 0)
{
q+=GetPixelChannels(image);
continue;
}
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
if (traits == UndefinedPixelTrait)
continue;
if ((traits & UpdatePixelTrait) == 0)
continue;
q[i]=ClampToQuantum(QuantumRange*polynomial_pixel[x].channel[i]);
}
q+=GetPixelChannels(image);
}
if (SyncCacheViewAuthenticPixels(polynomial_view,exception) == MagickFalse)
status=MagickFalse;
if (images->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_PolynomialImages)
#endif
proceed=SetImageProgress(images,PolynomialImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
polynomial_view=DestroyCacheView(polynomial_view);
polynomial_pixels=DestroyPixelThreadSet(polynomial_pixels);
if (status == MagickFalse)
image=DestroyImage(image);
return(image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S t a t i s t i c I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% StatisticImage() makes each pixel the min / max / median / mode / etc. of
% the neighborhood of the specified width and height.
%
% The format of the StatisticImage method is:
%
% Image *StatisticImage(const Image *image,const StatisticType type,
% const size_t width,const size_t height,ExceptionInfo *exception)
%
% A description of each parameter follows:
%
% o image: the image.
%
% o type: the statistic type (median, mode, etc.).
%
% o width: the width of the pixel neighborhood.
%
% o height: the height of the pixel neighborhood.
%
% o exception: return any errors or warnings in this structure.
%
*/
typedef struct _SkipNode
{
size_t
next[9],
count,
signature;
} SkipNode;
typedef struct _SkipList
{
ssize_t
level;
SkipNode
*nodes;
} SkipList;
typedef struct _PixelList
{
size_t
length,
seed;
SkipList
skip_list;
size_t
signature;
} PixelList;
static PixelList *DestroyPixelList(PixelList *pixel_list)
{
if (pixel_list == (PixelList *) NULL)
return((PixelList *) NULL);
if (pixel_list->skip_list.nodes != (SkipNode *) NULL)
pixel_list->skip_list.nodes=(SkipNode *) RelinquishAlignedMemory(
pixel_list->skip_list.nodes);
pixel_list=(PixelList *) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList **DestroyPixelListThreadSet(PixelList **pixel_list)
{
register ssize_t
i;
assert(pixel_list != (PixelList **) NULL);
for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++)
if (pixel_list[i] != (PixelList *) NULL)
pixel_list[i]=DestroyPixelList(pixel_list[i]);
pixel_list=(PixelList **) RelinquishMagickMemory(pixel_list);
return(pixel_list);
}
static PixelList *AcquirePixelList(const size_t width,const size_t height)
{
PixelList
*pixel_list;
pixel_list=(PixelList *) AcquireMagickMemory(sizeof(*pixel_list));
if (pixel_list == (PixelList *) NULL)
return(pixel_list);
(void) ResetMagickMemory((void *) pixel_list,0,sizeof(*pixel_list));
pixel_list->length=width*height;
pixel_list->skip_list.nodes=(SkipNode *) AcquireAlignedMemory(65537UL,
sizeof(*pixel_list->skip_list.nodes));
if (pixel_list->skip_list.nodes == (SkipNode *) NULL)
return(DestroyPixelList(pixel_list));
(void) ResetMagickMemory(pixel_list->skip_list.nodes,0,65537UL*
sizeof(*pixel_list->skip_list.nodes));
pixel_list->signature=MagickCoreSignature;
return(pixel_list);
}
static PixelList **AcquirePixelListThreadSet(const size_t width,
const size_t height)
{
PixelList
**pixel_list;
register ssize_t
i;
size_t
number_threads;
number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
pixel_list=(PixelList **) AcquireQuantumMemory(number_threads,
sizeof(*pixel_list));
if (pixel_list == (PixelList **) NULL)
return((PixelList **) NULL);
(void) ResetMagickMemory(pixel_list,0,number_threads*sizeof(*pixel_list));
for (i=0; i < (ssize_t) number_threads; i++)
{
pixel_list[i]=AcquirePixelList(width,height);
if (pixel_list[i] == (PixelList *) NULL)
return(DestroyPixelListThreadSet(pixel_list));
}
return(pixel_list);
}
static void AddNodePixelList(PixelList *pixel_list,const size_t color)
{
register SkipList
*p;
register ssize_t
level;
size_t
search,
update[9];
/*
Initialize the node.
*/
p=(&pixel_list->skip_list);
p->nodes[color].signature=pixel_list->signature;
p->nodes[color].count=1;
/*
Determine where it belongs in the list.
*/
search=65536UL;
for (level=p->level; level >= 0; level--)
{
while (p->nodes[search].next[level] < color)
search=p->nodes[search].next[level];
update[level]=search;
}
/*
Generate a pseudo-random level for this node.
*/
for (level=0; ; level++)
{
pixel_list->seed=(pixel_list->seed*42893621L)+1L;
if ((pixel_list->seed & 0x300) != 0x300)
break;
}
if (level > 8)
level=8;
if (level > (p->level+2))
level=p->level+2;
/*
If we're raising the list's level, link back to the root node.
*/
while (level > p->level)
{
p->level++;
update[p->level]=65536UL;
}
/*
Link the node into the skip-list.
*/
do
{
p->nodes[color].next[level]=p->nodes[update[level]].next[level];
p->nodes[update[level]].next[level]=color;
} while (level-- > 0);
}
static inline void GetMaximumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
maximum;
ssize_t
count;
/*
Find the maximum value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
maximum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color > maximum)
maximum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) maximum);
}
static inline void GetMeanPixelList(PixelList *pixel_list,Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the mean value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sum);
}
static inline void GetMedianPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the median value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
do
{
color=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetMinimumPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
minimum;
ssize_t
count;
/*
Find the minimum value for each of the color.
*/
p=(&pixel_list->skip_list);
count=0;
color=65536UL;
minimum=p->nodes[color].next[0];
do
{
color=p->nodes[color].next[0];
if (color < minimum)
minimum=color;
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) minimum);
}
static inline void GetModePixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
max_count,
mode;
ssize_t
count;
/*
Make each pixel the 'predominant color' of the specified neighborhood.
*/
p=(&pixel_list->skip_list);
color=65536L;
mode=color;
max_count=p->nodes[mode].count;
count=0;
do
{
color=p->nodes[color].next[0];
if (p->nodes[color].count > max_count)
{
mode=color;
max_count=p->nodes[mode].count;
}
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
*pixel=ScaleShortToQuantum((unsigned short) mode);
}
static inline void GetNonpeakPixelList(PixelList *pixel_list,Quantum *pixel)
{
register SkipList
*p;
size_t
color,
next,
previous;
ssize_t
count;
/*
Finds the non peak value for each of the colors.
*/
p=(&pixel_list->skip_list);
color=65536L;
next=p->nodes[color].next[0];
count=0;
do
{
previous=color;
color=next;
next=p->nodes[color].next[0];
count+=p->nodes[color].count;
} while (count <= (ssize_t) (pixel_list->length >> 1));
if ((previous == 65536UL) && (next != 65536UL))
color=next;
else
if ((previous != 65536UL) && (next == 65536UL))
color=previous;
*pixel=ScaleShortToQuantum((unsigned short) color);
}
static inline void GetRootMeanSquarePixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the root mean square value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
do
{
color=p->nodes[color].next[0];
sum+=(double) (p->nodes[color].count*color*color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum));
}
static inline void GetStandardDeviationPixelList(PixelList *pixel_list,
Quantum *pixel)
{
double
sum,
sum_squared;
register SkipList
*p;
size_t
color;
ssize_t
count;
/*
Find the standard-deviation value for each of the color.
*/
p=(&pixel_list->skip_list);
color=65536L;
count=0;
sum=0.0;
sum_squared=0.0;
do
{
register ssize_t
i;
color=p->nodes[color].next[0];
sum+=(double) p->nodes[color].count*color;
for (i=0; i < (ssize_t) p->nodes[color].count; i++)
sum_squared+=((double) color)*((double) color);
count+=p->nodes[color].count;
} while (count < (ssize_t) pixel_list->length);
sum/=pixel_list->length;
sum_squared/=pixel_list->length;
*pixel=ScaleShortToQuantum((unsigned short) sqrt(sum_squared-(sum*sum)));
}
static inline void InsertPixelList(const Quantum pixel,PixelList *pixel_list)
{
size_t
signature;
unsigned short
index;
index=ScaleQuantumToShort(pixel);
signature=pixel_list->skip_list.nodes[index].signature;
if (signature == pixel_list->signature)
{
pixel_list->skip_list.nodes[index].count++;
return;
}
AddNodePixelList(pixel_list,index);
}
static void ResetPixelList(PixelList *pixel_list)
{
int
level;
register SkipNode
*root;
register SkipList
*p;
/*
Reset the skip-list.
*/
p=(&pixel_list->skip_list);
root=p->nodes+65536UL;
p->level=0;
for (level=0; level < 9; level++)
root->next[level]=65536UL;
pixel_list->seed=pixel_list->signature++;
}
MagickExport Image *StatisticImage(const Image *image,const StatisticType type,
const size_t width,const size_t height,ExceptionInfo *exception)
{
#define StatisticImageTag "Statistic/Image"
CacheView
*image_view,
*statistic_view;
Image
*statistic_image;
MagickBooleanType
status;
MagickOffsetType
progress;
PixelList
**magick_restrict pixel_list;
ssize_t
center,
y;
/*
Initialize statistics image attributes.
*/
assert(image != (Image *) NULL);
assert(image->signature == MagickCoreSignature);
if (image->debug != MagickFalse)
(void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename);
assert(exception != (ExceptionInfo *) NULL);
assert(exception->signature == MagickCoreSignature);
statistic_image=CloneImage(image,image->columns,image->rows,MagickTrue,
exception);
if (statistic_image == (Image *) NULL)
return((Image *) NULL);
status=SetImageStorageClass(statistic_image,DirectClass,exception);
if (status == MagickFalse)
{
statistic_image=DestroyImage(statistic_image);
return((Image *) NULL);
}
pixel_list=AcquirePixelListThreadSet(MagickMax(width,1),MagickMax(height,1));
if (pixel_list == (PixelList **) NULL)
{
statistic_image=DestroyImage(statistic_image);
ThrowImageException(ResourceLimitError,"MemoryAllocationFailed");
}
/*
Make each pixel the min / max / median / mode / etc. of the neighborhood.
*/
center=(ssize_t) GetPixelChannels(image)*(image->columns+MagickMax(width,1))*
(MagickMax(height,1)/2L)+GetPixelChannels(image)*(MagickMax(width,1)/2L);
status=MagickTrue;
progress=0;
image_view=AcquireVirtualCacheView(image,exception);
statistic_view=AcquireAuthenticCacheView(statistic_image,exception);
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(image,statistic_image,statistic_image->rows,1)
#endif
for (y=0; y < (ssize_t) statistic_image->rows; y++)
{
const int
id = GetOpenMPThreadId();
register const Quantum
*magick_restrict p;
register Quantum
*magick_restrict q;
register ssize_t
x;
if (status == MagickFalse)
continue;
p=GetCacheViewVirtualPixels(image_view,-((ssize_t) MagickMax(width,1)/2L),y-
(ssize_t) (MagickMax(height,1)/2L),image->columns+MagickMax(width,1),
MagickMax(height,1),exception);
q=QueueCacheViewAuthenticPixels(statistic_view,0,y,statistic_image->columns, 1,exception);
if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL))
{
status=MagickFalse;
continue;
}
for (x=0; x < (ssize_t) statistic_image->columns; x++)
{
register ssize_t
i;
for (i=0; i < (ssize_t) GetPixelChannels(image); i++)
{
Quantum
pixel;
register const Quantum
*magick_restrict pixels;
register ssize_t
u;
ssize_t
v;
PixelChannel channel=GetPixelChannelChannel(image,i);
PixelTrait traits=GetPixelChannelTraits(image,channel);
PixelTrait statistic_traits=GetPixelChannelTraits(statistic_image,
channel);
if ((traits == UndefinedPixelTrait) ||
(statistic_traits == UndefinedPixelTrait))
continue;
if (((statistic_traits & CopyPixelTrait) != 0) ||
(GetPixelWriteMask(image,p) == 0))
{
SetPixelChannel(statistic_image,channel,p[center+i],q);
continue;
}
if ((statistic_traits & UpdatePixelTrait) == 0)
continue;
pixels=p;
ResetPixelList(pixel_list[id]);
for (v=0; v < (ssize_t) MagickMax(height,1); v++)
{
for (u=0; u < (ssize_t) MagickMax(width,1); u++)
{
InsertPixelList(pixels[i],pixel_list[id]);
pixels+=GetPixelChannels(image);
}
pixels+=GetPixelChannels(image)*image->columns;
}
switch (type)
{
case GradientStatistic:
{
double
maximum,
minimum;
GetMinimumPixelList(pixel_list[id],&pixel);
minimum=(double) pixel;
GetMaximumPixelList(pixel_list[id],&pixel);
maximum=(double) pixel;
pixel=ClampToQuantum(MagickAbsoluteValue(maximum-minimum));
break;
}
case MaximumStatistic:
{
GetMaximumPixelList(pixel_list[id],&pixel);
break;
}
case MeanStatistic:
{
GetMeanPixelList(pixel_list[id],&pixel);
break;
}
case MedianStatistic:
default:
{
GetMedianPixelList(pixel_list[id],&pixel);
break;
}
case MinimumStatistic:
{
GetMinimumPixelList(pixel_list[id],&pixel);
break;
}
case ModeStatistic:
{
GetModePixelList(pixel_list[id],&pixel);
break;
}
case NonpeakStatistic:
{
GetNonpeakPixelList(pixel_list[id],&pixel);
break;
}
case RootMeanSquareStatistic:
{
GetRootMeanSquarePixelList(pixel_list[id],&pixel);
break;
}
case StandardDeviationStatistic:
{
GetStandardDeviationPixelList(pixel_list[id],&pixel);
break;
}
}
SetPixelChannel(statistic_image,channel,pixel,q);
}
p+=GetPixelChannels(image);
q+=GetPixelChannels(statistic_image);
}
if (SyncCacheViewAuthenticPixels(statistic_view,exception) == MagickFalse)
status=MagickFalse;
if (image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_StatisticImage)
#endif
proceed=SetImageProgress(image,StatisticImageTag,progress++,
image->rows);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
statistic_view=DestroyCacheView(statistic_view);
image_view=DestroyCacheView(image_view);
pixel_list=DestroyPixelListThreadSet(pixel_list);
if (status == MagickFalse)
statistic_image=DestroyImage(statistic_image);
return(statistic_image);
}
|
GB_binop__pair_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__pair_uint8)
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A.*B function (eWiseMult): GB ((none))
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__pair_uint8)
// C+=b function (dense accum): GB (_Cdense_accumb__pair_uint8)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pair_uint8)
// C=scalar+B GB ((none))
// C=scalar+B' GB ((none))
// C=A+scalar GB ((none))
// C=A'+scalar GB ((none))
// C type: uint8_t
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = 1
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
uint8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
;
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
;
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = 1 ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_PAIR || GxB_NO_UINT8 || GxB_NO_PAIR_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__pair_uint8)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__pair_uint8)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *restrict Cx = (uint8_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__pair_uint8)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint8_t *Cx = (uint8_t *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
; ;
Cx [p] = 1 ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
#endif
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
#if 0
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
; ; \
Cx [pC] = 1 ; \
}
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
#endif
|
labels.h | /*
Algorithm for Steiner Problem in Graphs
Copyright (c) Microsoft Corporation
All rights reserved.
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED *AS IS*, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
#pragma once
#include "graph.h"
#include "binheap.h"
#include <omp.h>
struct Triple {
int u;
int v;
int w;
Triple(int _u, int _v, int _w) {
u = _u;
v = _v;
w = _w;
}
inline void Get(int &_u, int &_v, int &_w) const {
_u = u;
_v = v;
_w = w;
}
};
struct LabelEntry {
int hub;
int dist;
LabelEntry (int h, int d) {
hub = h;
dist = d;
}
LabelEntry() {
hub=-1; dist=-1;
//fprintf (stderr, "y");
}
};
class Labels {
public:
vector<LabelEntry> *labels;
int n;
Labels(int _n) {
n = _n;
labels = new vector<LabelEntry>[n+1];
//LabelEntry *le = new LabelEntry[n+1];
//exit(-1);
/*
for (int i=0; i<=n; i++) {
LabelEntry x;
(labels[i]).push_back(x);
labels[i].push_back(x);
labels[i].push_back(x);
labels[i].push_back(x);
fprintf (stderr, "%d ", labels[i].size());
}*/
}
~Labels() {
fprintf (stderr, "Deleting labels.\n");
delete [] labels;
}
void sort () {
for (int i=0; i<=n; i++) {
if (labels[i].size() <= 1) continue;
std::sort(labels[i].begin(), labels[i].end(), [](const LabelEntry &a, const LabelEntry &b) {return a.hub < b.hub;});
}
}
void OutputLabel (int x) {
vector<LabelEntry> &label = labels[x];
fprintf (stderr, "%d: ", x);
for (int i=0; i<label.size(); i++) {
fprintf (stderr, "%d:%d ", label[i].hub, label[i].dist);
}
fprintf (stderr, "\n");
}
void Output() {
for (int i=1; i<=n; i++) {OutputLabel(i);}
}
void AddEntry (int lab, int hub, int dist) {
labels[lab].push_back(LabelEntry(hub,dist));
}
};
template <class T> class Matrix {
public:
T **dist;
int n;
inline void fatal (const string &msg) {
fprintf (stderr, "%s", msg.c_str());
exit(-1);
}
void Init(int _n) {
if (dist) {
fatal ("Cannot be initialized twice.\n");
}
n = _n;
dist = new T *[n+1];
for (int v=0; v<=n; v++) {
dist[v] = new T [n+1];
}
}
Matrix() {
n = 0;
dist = NULL;
}
Matrix (T _n) {
dist = NULL;
Init(_n);
}
inline T Get(int i, int j) const {
return dist[i][j];
}
inline void Set (int i, int j, T value) {
//if (i<1 || i>n) fatal ("invalid range");
//if (j<1 || j>n) fatal ("invalid range");
dist[i][j] = value;
}
inline void Add (int i, int j, T value) {
dist[i][j] += value;
}
inline T increment (int i, int j) {
return (++dist[i][j]);
}
inline T decrement (int i, int j) {
return (--dist[i][j]);
}
void Output(FILE *file) {
for (int v=1; v<=n; v++) {
for (int w=1; w<=n; w++) {
int d = dist[v][w];
if (d >= 1000000000) fprintf (stderr, "<%d,%d>", v,w);
fprintf (file, "%9d", (int)dist[v][w]);
}
fprintf (file, "\n");
}
}
void Reset(T value) {
for (int v=0; v<=n; v++) {
for (int w=0; w<=n; w++) {
dist[v][w] = value;
}
}
}
~Matrix () {
fprintf (stderr, "Deleting matrix... ");
fflush(stderr);
for (int v=0; v<=n; v++) delete [] dist[v];
delete [] dist;
fprintf (stderr, "done.\n");
fflush(stderr);
}
};
class BFSData {
private:
vector<int> inserted;
vector<int> queue;
int n; //maximum id
int nextins; //position of next insertion
int nextrem; //position of next removal
public:
void Reset (int _n) {
n = _n;
inserted.resize(n+1,0);
queue.resize(n+1,0);
nextins = 0;
nextrem = 0;
}
BFSData () {
n = 0;
}
BFSData (int _n) {
Reset(_n);
}
inline int InsertionCount() const {
return nextins;
}
inline int GetElement(int i) const {
return queue[i];
}
inline void Insert(int x) {
queue[nextins++] = x;
inserted[x] = true;
}
inline int Remove() {
return (queue[nextrem++]);
}
inline bool IsInserted(int x) const {return inserted[x];}
inline bool IsEmpty() const {
return (nextins == nextrem);
}
// make the list empty
void Reset() {
for (int i=0; i<nextins; i++) {inserted[queue[i]] = false;}
nextins = nextrem = 0;
}
~BFSData() {}
};
class CoverCounters {
public:
int n;
Matrix<int> sourcecount; //sourcecount(h,v): number of uncovered paths starting at v covered by h
Matrix<int> targetcount; //targetcount(h,v): number of uncovered paths ending at v covered by h
vector<int> fwdcount; //number of forward labels h would be added to
vector<int> backcount; //number of backward labels h would be added to
void Reset() {
sourcecount.Reset(0);
targetcount.Reset(0);
for (int i=0; i<=n; i++) {
fwdcount[i] = backcount[i] = 0;
}
}
CoverCounters(int _n) {
n = _n;
sourcecount.Init(n);
targetcount.Init(n);
fwdcount.resize(n+1,0);
backcount.resize(n+1,0);
Reset();
}
inline void AddPath (int u, int v, int w) {
if (sourcecount.Get(v,u)==0) fwdcount[v] ++;
sourcecount.Add(v,u,1);
if (targetcount.Get(v,w)==0) backcount[v]++;
targetcount.Add(v,w,1);
}
inline void AddPath2 (int u, int v, int w) {
if (sourcecount.increment(v,u)==1) fwdcount[v]++;
if (targetcount.increment(v,w)==1) backcount[v]++;
//sourcecount.increment(v,u); //==1) fwdcount[v]++;
//targetcount.increment(v,w); //==1) backcount[v]++;
}
inline void AddPathSimple (Triple &t) {
sourcecount.increment(t.v,t.u);
targetcount.increment(t.v,t.w);
}
inline void AddPathSimple (int u, int v, int w) {
sourcecount.increment(v,u);
targetcount.increment(v,w);
}
inline void RemovePathSimple (int u, int v, int w) {
sourcecount.decrement(v,u);
targetcount.decrement(v,w);
}
inline void AddPath2 (Triple &t) {
if (sourcecount.increment(t.v,t.u)==1) fwdcount[t.v]++;
if (targetcount.increment(t.v,t.w)==1) backcount[t.v]++;
//sourcecount.increment(v,u); //==1) fwdcount[v]++;
//targetcount.increment(v,w); //==1) backcount[v]++;
}
void FixLabelCounters(int v) {
int fc = 0;
int bc = 0;
for (int x=1; x<=n; x++) {
if (sourcecount.Get(v,x)>0) fc ++;
if (targetcount.Get(v,x)>0) bc ++;
}
fwdcount[v] = fc;
backcount[v]= bc;
//fprintf (stderr, "%d %d ", fc - fwdcount[v], bc - backcount[v]);
}
void RemovePath (int u, int v, int w) {
sourcecount.Add(v,u,-1);
if (sourcecount.Get(v,u)==0) fwdcount[v] --;
targetcount.Add(v,w,-1);
if (targetcount.Get(v,w)==0) backcount[v]--;
}
void Output(FILE *file) {
sourcecount.Output(file);
targetcount.Output(file);
}
};
class LabelSolver {
public:
int ncount;
void DensestSubgraph(Graph &g) {
fprintf (stderr, "Should be computing densest subgraph.\n");
int n = g.VertexCount();
vector<int> curdeg(n+1,0);
vector<bool> present(n+1,true);
BinaryHeap<double> heap(n);
for (int v=1; v<=n; v++) {
curdeg[v] = g.GetDegree(v);
heap.Insert(v, curdeg[v]);
}
int curedges = g.EdgeCount();
int curvertices = n;
double maxdensity = 0;
// QUADRATIC IMPLEMENTATION!
int count = 0;
bool verbose = false;
while (!heap.IsEmpty()) {
double curdensity = (double) curedges / (double) curvertices;
if (curdensity > maxdensity) maxdensity = curdensity;
if (verbose) fprintf (stderr, "%6d: Started on graph with %d vertices and %d edges, density %.2f.\n", count, curvertices, curedges, curdensity);
count ++;
unsigned int v;
double deg;
heap.RemoveFirst(v, deg);
if (!present[v]) fprintf (stderr, "Invalid removal.\n");
// find lowest degree vertex to remove
/*
int v = -1;
for (int t=1; t<=n; t++) {
if (!present[t]) continue;
if (v<=0 || curdeg[t] < curdeg[v]) {v = t;}
}*/
if (verbose) fprintf (stderr, "Removing vertex %d with degree %d: ", v, curdeg[v]);
present[v] = false;
curvertices--;
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head; //neighbor
if (!present[w]) continue;
curdeg[v] --;
curdeg[w] --;
heap.Insert(w, curdeg[w]);
curedges --;
}
}
fprintf (stderr, "Maximum density = %.2f\n", maxdensity);
}
void ComputeCoverage2(Graph &g, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, bool LABEL_GREEDY, CoverCounters *covercount) {
int n = g.VertexCount();
RFWTimer timer(true);
// compute curcover[v]: number of pairs that are hit by v
for (int u=1; u<=n; u++) {
//int duv = dt.Get(u,v);
for (int w=1; w<=n; w++) {
if (covered.Get(u,w)) continue; //unreachable stuff
int duw = dt.Get(u,w);
for (int v=1; v<=n; v++) {
if (duw == dt.Get(u,v) + dt.Get(v,w)) {
//path u--w covered by v
curcover[v] ++;
if (LABEL_GREEDY) {covercount->AddPath(u,v,w);}
}
}
}
//curcover[v] = vcount;
if (u % 10 == 0) {
fprintf (stderr, "%d(%.2f) ", u, n * timer.getTime() / u);
fflush(stderr);
}
}
}
void MatrixComputeCoverage(Graph &g, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, bool LABEL_GREEDY, CoverCounters *covercount) {
fprintf (stderr, "Matrix-based coverage computation started.\n");
int n = g.VertexCount();
RFWTimer timer(true);
int threads = omp_get_max_threads();
fprintf (stderr, "Should be working with %d threads.\n", threads);
vector<Triple> *triples = new vector<Triple> [threads];
// making v the external loop seems to be advantageous; w in the internal loop because
// the access to the distance table is sequential (we only look at rows u and v)
// compute curcover[v]: number of pairs that are hit by v
// Takes Theta(n^3) time because we are not using the graph---but reasonably efficient
#pragma omp parallel for schedule(dynamic)
for (int v=1; v<=n; v++) {
int tid = omp_get_thread_num();
vector<Triple> &curtrip = triples[tid];
curtrip.clear();
int vcount = 0;
for (int u=1; u<=n; u++) {
int duv = dt.Get(u,v);
for (int w=1; w<=n; w++) {
// is path u-v covered by w?
if (dt.Get(u,w) == duv + dt.Get(v,w)) { //awesome sequential access on u and w arrays
if (covered.Get(u,w)) continue; //unreachable stuff
vcount ++;
if (LABEL_GREEDY) {
curtrip.push_back(Triple(u,v,w));
//#pragma omp critical
//{
// covercount->AddPath(u,v,w);
//}
}
}
}
}
curcover[v] = vcount;
int tcount = curtrip.size();
if (tcount>0) {
#pragma omp critical
for (int i=0; i<tcount; i++) {
covercount->AddPath2(curtrip[i]);
}
}
if (v % 10 == 0) {
fprintf (stderr, "%d(%.2f) ", v, n * timer.getTime() / v);
fflush(stderr);
}
}
}
//-----------------------------------------------------
// Do search from v but only following vertices w such that:
// - u-v-w is a shortest path
// - u-v-w is previously uncovered
// - w is a target we care about (BAD!)
// WARNING: THIS IS COMPLETELY WRONG.
//-----------------------------------------------------------
void TargetDAGRestricted (Graph &g, int u, int v, int bestv, Matrix<int> &dt, Matrix<int> &covered, vector<int> &targets, BFSData &bfsdata) {
bfsdata.Reset();
bfsdata.Insert(v);
int n = g.VertexCount();
int duv = dt.Get(u,v);
while (!bfsdata.IsEmpty()) {
int x = bfsdata.Remove();
SPGArc *a, *end;
int dx = dt.Get(u,x); //duv + dt.Get(v,x); //distance to x through v
//if (dx != dt.Get(u,x)) fprintf (stderr, "!");
for (g.GetBounds(x,a,end); a<end; a++) {
int w = a->head;
if (bfsdata.IsInserted(w)) continue; //already seen w in the search
//if (!targets[w]) continue; //w is not a target
//if (covered.Get(u,w)>0 && covered.Get(u,w)!=bestv) continue;
//fprintf (stderr, ".");
int c = covered.Get(u,w);
if (c==bestv || c==0) {
if (dx + a->cost == dt.Get(u,w)) bfsdata.Insert(w);
}
//} else badarcs ++; // arc does not lead to improved distance...
}
}
}
// run Dijkstra from v, visiting only vertices w such that u-v-w is a shortest path
void TargetDAG (Graph &g, int u, int v, Matrix<int> &dt, Matrix<int> &covered, BFSData &bfsdata) {
bfsdata.Reset();
bfsdata.Insert(v);
int n = g.VertexCount();
int duv = dt.Get(u,v);
while (!bfsdata.IsEmpty()) {
int x = bfsdata.Remove();
SPGArc *a, *end;
int dx = dt.Get(u,x); //duv + dt.Get(v,x); //distance to x through v
//if (dx != dt.Get(u,x)) fprintf (stderr, "!");
for (g.GetBounds(x,a,end); a<end; a++) {
int w = a->head;
if (bfsdata.IsInserted(w)) continue;
if (covered.Get(u,w)) continue;
if (dx + a->cost == dt.Get(u,w)) bfsdata.Insert(w);
//} else badarcs ++; // arc does not lead to improved distance...
}
}
}
void TraverseDAG (Graph &g, int s, Matrix<int> &dt, BFSData &bfsdata) {
bfsdata.Reset();
bfsdata.Insert(s); //start a search from s
int n = g.VertexCount();
int debugcount = 0;
int badarcs = 0;
while (!bfsdata.IsEmpty()) {
int v = bfsdata.Remove();
debugcount ++;
if (debugcount > n) {
fprintf (stderr, "Infinite loop.\n");
exit(-1);
}
SPGArc *a, *end;
int dsv = dt.Get(s,v);
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head;
if (dsv + a->cost == dt.Get(s,w)) {
if (bfsdata.IsInserted(w)) continue;
bfsdata.Insert(w);
} else badarcs ++; // arc does not lead to improved distance...
}
}
//fprintf (stderr, "<%.5f> ", 100 * (double)badarcs / (2.0 * g.EdgeCount()));
}
void TraverseDAG(Graph &g, int s, int t, Matrix<int> &dt, BFSData &bfsdata) {
bfsdata.Reset();
bfsdata.Insert(s); //start a search from s
int n = g.VertexCount();
int dst = dt.Get(s,t);
int debugcount = 0;
while (!bfsdata.IsEmpty()) {
int v = bfsdata.Remove();
debugcount ++;
if (debugcount > n) {
fprintf (stderr, "Infinite loop.\n");
exit(-1);
}
// v hits the u-w path
// curcover[v] ++;
//int dsv = dt.Get(s,v);
//if (dsv + dt.Get(v,t) != dst) continue;
int dsv = dt.Get(s,v);
int gap = dst - dsv; // this is how much is missing from the path
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int x = a->head;
if (bfsdata.IsInserted(x)) continue; //already seen this vertex
//if (dt.Get(s,x) + dt.Get(x,t) == dst) bfsdata.Insert(x);
if (a->cost + dt.Get(x,t) == gap) bfsdata.Insert(x); //WARNING: dist x--t is expensive
}
}
}
// increase curcover[v] for all vertices v that are on the shortest s-t path.
void UpdateCoverage (vector<int> &curcover, int s, int t, Matrix<int> &dt, vector<int> &vlist, bool LABEL_GREEDY, CoverCounters *covercount) {
int count = vlist.size();
int curcount = 0;
//fprintf (stderr, "<%d> ", count);
int dst = dt.Get(s,t);
for (int i=0; i<count; i++) {
int v = vlist[i];
if (dst == dt.Get(s,v) + dt.Get(v,t)) {
curcount ++;
curcover[v] ++;
if (LABEL_GREEDY) covercount->AddPath(s,v,t);
}
}
//fprintf (stderr, "%.0f ", 100.0 * curcount / count);
}
void FancyComputeCoverage(Graph &g, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, bool LABEL_GREEDY, CoverCounters *covercount) {
fprintf (stderr, "Computing coverage using fancy algorithm!\n");
int n = g.VertexCount();
BFSData fulldata(n);
BFSData localdata(n);
vector<int> tdone(n+1, 0);
vector<int> relevant;
vector<int> todo;
relevant.reserve(n+1);
todo.reserve(n+1);
//vector<int> bestcand(n+1,0);
RFWTimer timer (true);
for (int s=1; s<=n; s++) {
for (int v=1; v<=n; v++) tdone[v] = 0;
TraverseDAG(g,s,dt,fulldata);
int fcount = fulldata.InsertionCount();
int searches = 0;
// visit vertices in decreasing order of "distance" from s
for (int i=fcount-1; i>=0; i--) {
int t = fulldata.GetElement(i);
if (tdone[t]) continue;
tdone[t] = 1;
searches ++;
TraverseDAG(g,s,t,dt,localdata);
int icount = localdata.InsertionCount();
//fprintf (stderr, "[%d] ", icount);
relevant.clear();
todo.clear();
// look at all elements visited by the search
for (int i=0; i<icount; i++) {
int v = localdata.GetElement(i);
curcover[v]++;
if (LABEL_GREEDY) covercount->AddPath(s,v,t);
relevant.push_back(v);
if (!tdone[v]) todo.push_back(v); //got nontrivial information about this vertex
}
// Let v be a vertex that hits the s-t pair.
// Every vertex that hits s-v must also hit s-t.
// To find the vertices that hit s-v, we only need
// to look among those that hit s-t. This is done below.
for (int i=todo.size()-1; i>=0; i--) {
int v = todo[i];
UpdateCoverage(curcover, s, v, dt, relevant, LABEL_GREEDY, covercount);
tdone[v] = 1;
}
}
//fprintf (stderr, "<%.0f:%d>\n", 100.0 * (double)searches / (double)fcount, fcount);
if (s%10 == 0) {
fprintf (stderr, "%d (%.2f) ", s, n * timer.getTime() / (double)s);
fflush(stderr);
}
}
}
void GraphComputeCoverageCenter (Graph &g, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, bool LABEL_GREEDY, CoverCounters *covercount) {
int n = g.VertexCount();
RFWTimer timer (true);
RFWTimer crittimer (false);
int threads = omp_get_max_threads();
fprintf (stderr, "Should be working with %d threads.\n", threads);
// we're looking for u-v-w paths
#pragma omp parallel for schedule(dynamic)
for (int v=1; v<=n; v++) {
int tid = omp_get_thread_num();
BFSData bfsdata(n);
int vcount = 0;
for (int u=1; u<=n; u++) {
if (covered.Get(u,v)) continue;
TargetDAG(g, u, v, dt, covered, bfsdata); //could do a little clean-up here...
int wcount = bfsdata.InsertionCount();
for (int i=0; i<wcount; i++) {
int w = bfsdata.GetElement(i);
vcount ++;
if (LABEL_GREEDY) covercount->AddPathSimple(u,v,w); //this only affects v
}
}
curcover[v] = vcount;
if (LABEL_GREEDY) covercount->FixLabelCounters(v);
if (v%10 == 0) {
fprintf (stderr, " %d(%.2f)", v, (double)n*timer.getTime() / (double)v);
fflush(stderr);
}
}
}
void GraphComputeCoverage(Graph &g, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, bool LABEL_GREEDY, CoverCounters *covercount) {
int n = g.VertexCount();
RFWTimer timer (true);
RFWTimer crittimer (false);
const bool FANCY = true;
int threads = omp_get_max_threads();
fprintf (stderr, "Should be working with %d threads.\n", threads);
vector<Triple> *triples = new vector<Triple> [threads];
fprintf (stderr, "Computing coverage using graph algorithm!\n");
#pragma omp parallel for schedule(dynamic)
for (int u=1; u<=n; u++) {
int tid = omp_get_thread_num();
if (FANCY) {
//triples[tid].reserve(n);
triples[tid].clear();
}
BFSData bfsdata(n);
for (int w=1; w<=n; w++) {
if (covered.Get(u,w)) continue; //these are unreachable!
//we'll visit all vertices that hit the u,w path
TraverseDAG(g,u,w,dt,bfsdata);
int icount = bfsdata.InsertionCount();
int v;
if (FANCY) {
for (int i=0; i<icount; i++) {
v = bfsdata.GetElement(i);
triples[tid].push_back(Triple(u,v,w));
}
} else {
//warning: this is too fine-grained
#pragma omp critical
for (int i=0; i<icount; i++) {
curcover[v=bfsdata.GetElement(i)]++;
if (LABEL_GREEDY) {covercount->AddPath(u,v,w);} //this only affects stuff that have to do with v
}
}
}
if (FANCY) {
vector<Triple> &curt = triples[tid];
//std::sort(curt.begin(), curt.end(), [](const Triple &a, const Triple &b) {return a.v < b.v;});
#pragma omp critical
{
crittimer.resume();
int tcount = curt.size();
for (int i=0; i<tcount; i++) {
int u, v, w;
curt[i].Get(u,v,w);
// this will only affect memory positions indexed by v, so it could be made parallel
//fprintf (stderr, "[%d,%d,%d] ", u, v, w);
curcover[v]++;
if (LABEL_GREEDY) {covercount->AddPath2(u,v,w);}
}
crittimer.pause();
}
}
if (u%10 == 0) {
fprintf (stderr, " %d(%.2f)", u, (double)n*timer.getTime() / (double)u);
fprintf (stderr, "[%.0f] ", 100.0 * crittimer.getTime() / timer.getTime());
fflush(stderr);
}
}
delete [] triples;
}
struct ThreadData {
vector<Triple> triples;
BFSData coveragebfs;
vector<int> targetlist;
int vcount;
double totalcovered;
void Reset(int n) {
triples.reserve(n+1);
targetlist.reserve(n+1);
coveragebfs.Reset(n);
vcount = 0;
totalcovered = 0;
}
};
struct DecreaseThreadData {
//vector<Triple> triples;
BFSData coveragebfs;
//vector<int> targetlist;
//int vcount;
vector<int> target;
vector<int> targetlist;
double totalcovered;
void Reset(int n) {
//triples.reserve(n+1);
targetlist.reserve(n+1);
target.resize(n+1,0);
coveragebfs.Reset(n);
//vcount = 0;
totalcovered = 0;
}
};
void DecreaseCoverageMatrixCenter (Graph &g, int bestv, vector<pair<int,int>> &pairs, vector<int> &curcover, Matrix<int> &dt, CoverCounters *covercount, Matrix<int> &covered, bool LABEL_GREEDY) {
fprintf (stderr, "DCMC ");
int npairs = pairs.size();
if (npairs == 0) fprintf (stderr, "!");
int n = g.VertexCount();
vector<int> ulist;
vector<int> first (n+1,-1);
pairs.push_back(make_pair(-1,-1)); //sentinel
for (int p=0; p<npairs; p++) {
int u = pairs[p].first;
int w = pairs[p].second;
if (p==0 || (pairs[p-1].first != u)) {
first[u] = p;
ulist.push_back(u);
}
}
sort(ulist.begin(), ulist.end());
fprintf (stderr, "pair%d src%d ", pairs.size(), ulist.size());
// we're looking for u-v-w paths
//#pragma omp parallel for schedule(dynamic)
#pragma omp parallel for schedule(dynamic)
for (int v=1; v<=n; v++) {
int ucount = ulist.size();
int vcount = 0;
for (int iu=0; iu<ucount; iu++) {
int u = ulist[iu];
if ((covered.Get(u,v)>0) && (covered.Get(u,v)!=bestv)) continue;
int duv = dt.Get(u,v);
for (int j=first[u]; pairs[j].first == u; j++) {
int w = pairs[j].second;
//targetlist.push_back(w);
//targets[w] = 1;
if (duv + dt.Get(v,w) == dt.Get(u,w)) {
vcount ++;
if (LABEL_GREEDY) covercount->RemovePathSimple(u,v,w); //this only affects v
}
}
}
curcover[v] -= vcount;
if (LABEL_GREEDY) covercount->FixLabelCounters(v);
}
}
//-------------------------------------------------------------------------
// Decrease coverage of all vertices that hit a give set of shortest paths
//-------------------------------------------------------------------------
void DecreaseCoverageGraphCenter (Graph &g, int bestv, vector<pair<int,int>> &pairs, vector<int> &curcover, Matrix<int> &dt, CoverCounters *covercount, Matrix<int> &covered, bool LABEL_GREEDY) {
RFWTimer timer(true);
fflush(stderr);
int n = g.VertexCount();
int nthreads = omp_get_max_threads();
fprintf (stderr, "Should be decreasing coverage with %d threads.\n", nthreads);
DecreaseThreadData *tdata = new DecreaseThreadData[nthreads];
for (int t=0; t<nthreads; t++) {
tdata[t].Reset(n);
}
vector<int> ulist;
vector<int> first (n+1,-1);
int npairs = pairs.size();
pairs.push_back(make_pair(-1,-1)); //sentinel
for (int p=0; p<npairs; p++) {
int u = pairs[p].first;
int w = pairs[p].second;
if (p==0 || (pairs[p-1].first != u)) {
first[u] = p;
ulist.push_back(u);
}
}
sort(ulist.begin(), ulist.end());
fprintf (stderr, "There are %d pairs from %d sources.\n", pairs.size(), ulist.size());
// we're looking for u-v-w paths
#pragma omp parallel for schedule(dynamic)
for (int v=1; v<=n; v++) {
int tid = omp_get_thread_num();
BFSData &bfsdata = tdata[tid].coveragebfs;
vector<int> &targets = tdata[tid].target;
vector<int> &targetlist = tdata[tid].targetlist;
int vcount = 0;
int ucount = ulist.size();
for (int iu=0; iu<ucount; iu++) {
int u = ulist[iu];
//if (covered.Get(u,v)!=bestv) continue;
//int c = covered.Get(u,v);
//if (c>0 && c!=bestv) continue; // path u-v has already been covered---every descendent will be covered as well
// build incidence vecotr of targets
targetlist.clear();
for (int j=first[u]; pairs[j].first == u; j++) {
int w = pairs[j].second;
targetlist.push_back(w);
targets[w] = 1;
}
//int wcount = targetlist.size();
TargetDAGRestricted(g, u, v, bestv, dt, covered, targets, bfsdata);
int wcount = bfsdata.InsertionCount();
//fprintf (stderr, "%d ", wcount);
for (int i=0; i<wcount; i++) {
int w = bfsdata.GetElement(i);
if (targets[w]) {
vcount ++;
if (LABEL_GREEDY) covercount->RemovePathSimple(u,v,w); //this only affects v
}
}
for (int j=first[u]; pairs[j].first == u; j++) {
int w = pairs[j].second;
targets[w] = 0;
}
for (int w=1; w<=n; w++) {
if (targets[w]) fprintf (stderr, "B%d ", w);
}
}
if (v==105) fprintf (stderr, "Updating %d by %d to %d.\n", v, vcount, curcover[v] - vcount);
curcover[v] -= vcount;
// WARNING: BAD!
if (LABEL_GREEDY) covercount->FixLabelCounters(v);
if (v%10 == 0) {
fprintf (stderr, " %d(%.2f)", v, (double)n*timer.getTime() / (double)v);
fflush(stderr);
}
}
delete [] tdata;
}
//---------------------------------------------------------------------------------------------------
// Decrease coverage of all vertices that current hit an s-t path, with t \in targetlist.
// Does so by performing a BFS from s, but only scanning vertices that cover at least one such path.
//---------------------------------------------------------------------------------------------------
void DecreaseCoverageGraph (Graph &g, int s, int bestv, vector<int> &curcover, vector<int> &targetlist, Matrix<int> &dt, CoverCounters *covercount, BFSData &bfs, Matrix<int> &covered, vector<Triple> &triples) {
int tcount = targetlist.size();
bfs.Reset();
bfs.Insert(s); //start starts at s
//int n = g.VertexCount();
//vector<int> dst(n+1);
//for (int t=1; t<=n; t++) dst[t] = dt.Get(s,t);
while (!bfs.IsEmpty()) {
int v = bfs.Remove();
int c = covered.Get(s,v);
if (c>0 && c!=bestv) continue; //if the s-v path is previously covered, any superpath was already covered as well
int dsv = dt.Get(s,v);
bool scan = false; //does it make sense to scan this vertex?
if (v == bestv) {scan = true;} // vertex just picked; it's already been taken care of, but we need to keep scanning
else {
// check for which t vertex v is on the shortest s-t path
const bool DEBUG_COUNT = false;
int count = 0;
for (int i=0; i<tcount; i++) {
int t = targetlist[i];
if (dt.Get(s,t) == dsv + dt.Get(v,t)) { //could precompute dist(s,t)
//if (dst[t] == dsv + dt.Get(v,t)) { //could precompute dist(s,t)
scan = true;
if (DEBUG_COUNT) count ++;
triples.push_back(Triple(s,v,t));
}
}
if (DEBUG_COUNT) fprintf (stderr, "%.0f ", 100.0 * (double)count / (double)tcount);
}
//fprintf (stderr, "%d", scan);
if (scan) {
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head;
if (bfs.IsInserted(w)) continue;
//if (dsv + a->cost != dt.Get(s,w)) continue; //not really relevant if it is a shortest path
bfs.Insert(w);
}
}
}
//fprintf (stderr, "\n", stderr);
}
void DecreaseCoverageMatrix(int n, int s, vector<int> &curcover, int bestv, vector<int> &targetlist, Matrix<int> &dt, CoverCounters *covercount) {
// for fixed s and considering all w such that s-w is newly convered by bestv,
// we have a path u-w.
// For each such path, we must find all other elements x that hit u-w as well
int tcount = targetlist.size();
for (int x=1; x<=n; x++) { // try all possible middle vertices
if (curcover[x] == 0) continue; //vertex no longer relevant
if (x==bestv) continue;
int dux = dt.Get(s,x); // prefix
for (int i=0; i<tcount; i++) {
int w = targetlist[i];
if (dt.Get(s,w) == dux + dt.Get(x,w)) {
curcover[x] --;
if (covercount) {covercount->RemovePath(s,x,w);}
}
}
}
}
int SetActualThreads(int maxthreads, bool verbose) {
int available_threads = omp_get_max_threads();
int threads;
if (maxthreads == -1) threads = available_threads;
else threads = min(maxthreads, available_threads);
/*if (maxthreads != -1) {cores = min(cores, numCores);}*/
omp_set_num_threads(threads);
if (verbose) {
fprintf (stderr, "threadsmax %d\n", maxthreads);
fprintf (stderr, "threadsavailable %d\n", available_threads);
fprintf (stderr, "threadsactual %d\n", threads);
fflush (stderr);
}
return threads;
}
void BFS (Graph &g, int r, BFSData &bfsdata) {
bfsdata.Reset();
bfsdata.Insert(r);
while (!bfsdata.IsEmpty()) {
int v = bfsdata.Remove();
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head;
if (!bfsdata.IsInserted(w)) bfsdata.Insert(w);
}
}
}
void OutputMETIS(FILE *file, Graph &g, vector<int> &old2new) {
int newn = 0;
int oldn = g.VertexCount();
fprintf (stderr, "Outputting metis file.\n");
fflush(stderr);
for (int v=1; v<=oldn; v++) {
newn = std::max(old2new[v], newn);
}
fprintf (stderr, "Outputting graph with %d vertices.\n", newn);
fflush(stderr);
int acount = 0;
for (int step=0; step<=1; step++) {
if (step==1) {
fprintf (file, "%d %d 0\n", newn, acount/2);
}
for (int v=1; v<=oldn; v++) {
if (old2new[v] <= 0) continue;
fprintf (stderr, "<%d> ", old2new[v]);
int count = 0;
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = old2new[a->head];
if (w <= 0) continue;
if (step==0) acount ++;
else {
if (count++ > 0) fprintf (file, " ");
fprintf (file, "%d", w);
}
}
if (step==1) fprintf (file, "\n");
}
if (step==0) {
fprintf (stderr, "It appears the graph has %d arcs (%d edges) and %d vertices.\n", acount, acount/2, newn);
}
}
}
void ExtractGiantComponent(Graph &g) {
int n = g.VertexCount();
BFSData bfsdata(n);
vector<int> component(n+1,-1);
vector<int> compsize(n+1,0);
int gc = 0;
fprintf (stderr, "Extracting giant component.\n");
fflush(stderr);
for (int v=1; v<=n; v++) {
if (component[v]>=0) continue;
BFS(g,v,bfsdata);
int size = bfsdata.InsertionCount();
for (int i=0; i<size; i++) {
int w = bfsdata.GetElement(i);
component[w] = v;
}
compsize[v] = size;
if (gc==0 || size>compsize[gc]) gc = v;
if (size > 10) fprintf (stderr, "Component %d has size %d.\n", v, size);
}
fprintf (stderr, "Biggest component is %d, with size %d.\n", gc, compsize[gc]);
fflush(stderr);
vector<int> old2new(n+1,-1);
int nextid = 1;
for (int v=1; v<=n; v++) {
if (component[v]==gc) {
old2new[v] = nextid++;
}
}
fprintf (stderr, "About to output.\n");
fflush(stderr);
FILE *file = fopen ("giantcc.graph", "w");
if (!file) {
fprintf (stderr, "could not open file\n");
exit(-1);
}
OutputMETIS(file, g, old2new);
fclose(file);
}
void UpdateCoverageInfo(Graph &g, int bestv, vector<int> &curcover, Matrix<int> &covered, Matrix<int> &dt, vector<int> &forward, vector<int> &backward,
bool LABEL_GREEDY, CoverCounters *covercount, Labels &flabels, Labels &blabels, int &addcount,
double &fulltotalcovered, int &fullvcount, int origcount, bool DECREASE_COUNTERS) {
int n = g.VertexCount();
int threads = omp_get_max_threads();
//fprintf (stderr, "Should be updating using %d threads.\n", threads);
/*
vector<Triple> *triples = new vector<Triple> [threads];
BFSData coveragebfs(n);
vector<int> targetlist;
targetlist.reserve(n+1);*/
vector<pair<int,int>> pairs; //these are the pairs of vertices covered by adding v
ThreadData *tdata = new ThreadData [threads];
for (int t=0; t<threads; t++) {
tdata[t].Reset(n);
}
bool CENTER_BASED_UPDATE = false;
RFWTimer localtimer(true);
int timerstep = 500000;
int timertarget = timerstep;
#pragma omp parallel for schedule(dynamic)
for (int u=1; u<=n; u++) {
if (covered.Get(u,bestv)) continue; //speedup! already covered this part
int duv = dt.Get(u,bestv);
int tid = omp_get_thread_num();
vector<Triple> &triples = tdata[tid].triples;
vector<int> &targetlist = tdata[tid].targetlist;
double &totalcovered = tdata[tid].totalcovered;
//int &vcount = tdata[tid].vcount;
int vcount = 0;
triples.clear();
targetlist.clear();
// do we really need to loop over everything to mark?
for (int w=1; w<=n; w++) {
if (covered.Get(u,w)) continue; // fprintf (stderr, "!");
if (dt.Get(u,w) == duv + dt.Get(bestv,w)) {
//if (LABEL_GREEDY) {covercount->RemovePath(u,bestv,w);}
triples.push_back(Triple(u,bestv,w)); //found a path we want to cover
targetlist.push_back(w);
// decrease conters of everybody that covers this path
// could save something here by having two (pre-)sorted lists at x
// the first has distances to x, the second distances from x
// we can do binary search for the first match, then just walk within the desired range;
// could delete entries as paths are covered
totalcovered --;
vcount ++;
covered.Set(u,w,bestv); //only one thread will try to updated u ---- no races
}
}
// now remember all paths starting at u
bool SIMPLE = false;
if (SIMPLE) {
fprintf (stderr, "NOT SUPPORTED ANYMORE.\n");
exit(-1);
int tcount = targetlist.size();
for (int i=0; i<tcount; i++) {
int w = targetlist[i];
int duw = dt.Get(u,w); //must find all x that hit the u-w path
for (int x=1; x<=n; x++) {
if (duw == dt.Get(u,x) + dt.Get(x,w)) {
if (curcover[x] == 0) continue; //x doesn't cover anything anyway
if (x==bestv) continue;
curcover[x] --;
if (LABEL_GREEDY) {covercount->RemovePath(u,x,w);}
}
}
}
} else {
//DecreaseCoverageMatrix(n, u, curcover, bestv, targetlist, dt, &covercount);
if (DECREASE_COUNTERS && !CENTER_BASED_UPDATE) {
DecreaseCoverageGraph(g, u, bestv, curcover, targetlist, dt, covercount, tdata[tid].coveragebfs, covered, triples);
//DecreaseCoverageMatrix(g.VertexCount(), u, curcover, bestv, targetlist, dt, covercount); //, tdata[tid].coveragebfs, covered, triples);
}
int tcount = triples.size();
if (CENTER_BASED_UPDATE) {
#pragma omp critical
{
if (!forward[u]) {
forward[u] = true;
flabels.AddEntry(u,bestv,dt.Get(u,bestv));
addcount ++;
}
fullvcount += vcount;
int wcount = targetlist.size();
for (int i=0; i<wcount; i++) {
int w = targetlist[i];
pairs.push_back(make_pair(u,w));
if (!backward[w]) {
backward[w] = true;
blabels.AddEntry(w,bestv,dt.Get(bestv,w));
addcount ++;
}
}
//fprintf (stderr, "Pairs has %d vertices.\n", pairs.size());
}
continue;
}
#pragma omp critical
{
fullvcount += vcount;
for (int i=0; i<tcount; i++) {
int u = triples[i].u;
int v = triples[i].v;
int w = triples[i].w;
curcover[v] --;
if (LABEL_GREEDY) {covercount->RemovePath(u,v,w);}
if (v == bestv) {
if (!forward[u]) {
forward[u] = true;
flabels.AddEntry(u,bestv,dt.Get(u,bestv));
addcount ++;
}
if (!backward[w]) {
backward[w] = true;
blabels.AddEntry(w,bestv,dt.Get(bestv,w));
addcount ++;
}
}
}
if (fullvcount > timertarget) {
while (fullvcount > timertarget) {timertarget += timerstep;}
double t = localtimer.getTime();
fprintf (stderr, "%.0f/%.0f ", t, (double)origcount * t / (double)fullvcount);
fflush(stderr);
}
}
}
}
for (int t=0; t<threads; t++) {
fulltotalcovered += tdata[t].totalcovered;
//fullvcount += tdata[t].vcount;
}
if (DECREASE_COUNTERS && CENTER_BASED_UPDATE) {
fprintf (stderr, "CTR ");
DecreaseCoverageGraphCenter(g, bestv, pairs, curcover, dt, covercount, covered, LABEL_GREEDY);
//DecreaseCoverageMatrixCenter(g, bestv, pairs, curcover, dt, covercount, covered, LABEL_GREEDY);
//DecreaseCoverageGraphCenter (g, u, bestv, curcover, targetlist, dt, covercount, tdata[tid].coveragebfs, covered, triples);
}
delete [] tdata;
}
void OutputDetailedLabel(FILE *file, Graph &g, Labels &flabels, Labels &blabels, int v, vector<int> &rank) {
vector<LabelEntry> &label = flabels.labels[v];
int hcount = label.size();
//fprintf (stderr, "%d:%d:%d ", v, g.GetDegree(v), hcount);
for (int i=0; i<hcount; i++) {
int h = label[i].hub;
int dvh = label[i].dist;
int rh = rank[h]; //that's the rank of h
int p = v;
// find the parent
for (int j=0; j<hcount; j++) {
int x = label[j].hub;
if (x == h) continue;
if (rank[x] < rank[p]) {
int dvx = label[j].dist;
int dxh = Query(flabels.labels[x], blabels.labels[h]);
if (dvx + dxh == dvh) {p = x;}
}
}
if (v==h) p = 2147483647;
//v hub distance parent shortcut
fprintf (file, "%d %d %d %d %d\n", v, h, dvh, p, 0);
}
}
// weighted: take sum of the degrees of the neighbors
// adaptive: once a vertex is picked, adjust priorities of neighbors
// neighbors: not supported yet
void ComputeSimpleOrders (char *prefix, Graph &g, bool weighted, bool adaptive, int neighbors) {
int n = g.VertexCount();
vector<double> origpriority (n+1,0);
BinaryHeap<double> heap (n);
vector<int> rank2vertex(n+1,-1);
for (int v=1; v<=n; v++) {
double priority;
if (weighted) {
double sumneighbors = 0;
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head;
sumneighbors += g.GetDegree(w);
}
priority = sumneighbors;
} else {
priority = g.GetDegree(v);
}
heap.Insert(v,-priority);
origpriority[v] = priority;
}
int rank = 0;
while (!heap.IsEmpty()) {
double value;
unsigned int v;
heap.RemoveFirst(v, value);
value = -value;
//fprintf (stderr, "%.0f:%.0f ", abs(value), origpriority[v]);
rank ++;
rank2vertex[rank] = v;
if (adaptive) {
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head;
if (heap.Contains(w)) {
double oldvalue = heap.GetElementValue(w);
heap.RemoveElement(w);
double delta = weighted ? g.GetDegree(v) : 1.0;
double newvalue = oldvalue + delta;
//fprintf (stderr, "<%.0f> ", newvalue);
heap.Insert(w, newvalue);
}
}
}
}
char filename[256];
sprintf (filename, "%s-%s-%s.order", prefix, weighted ? "weighted" : "plain", adaptive ? "adaptive" : "fixed");
OutputOrder(rank2vertex, n, filename);
}
void OutputOrder (vector<int> &rank2vertex, int n, char *filename) {
FILE *file = fopen (filename, "w");
fprintf (stderr, "\nOutputting order to file %s... ", filename);
if (!file) {
fprintf (stderr, "Could not open order file for writing.\n");
} else {
for (int r=1; r<=n; r++) {
fprintf (file, "%d\n", rank2vertex[r]);
}
fclose(file);
}
fprintf (stderr, "done.\n");
}
void Solve (int argc, char **argv) {
fprintf (stderr, "Should be solving labels.\n");
bool LABEL_GREEDY = true; //false;
bool PERTURB_EDGES = false;
const int INFINITY = 1000000000;
Graph g;
if (argc <= 2 || argc % 2 != 1) {
fprintf (stderr, "invalid number of parameters");
exit(-1);
}
bool DIMACS_FORMAT = false;
char *filename = NULL;
char *prefix = NULL;
int threads = 1;
int seed = 1;
//bool PERTURB_EGDGES = false;
for (int i=1; i<argc; i+=2) {
char *key = argv[i];
char *value = argv[i+1];
if (strcmp(key, "-dimacs")==0) {
DIMACS_FORMAT = true;
filename = value;
} else if (strcmp(key, "-metis")==0) {
DIMACS_FORMAT = false;
filename = value;
} else if (strcmp(key, "-order")==0) {
prefix = value;
} else if (strcmp(key, "-seed")==0) {
seed = atoi(value);
} else if (strcmp(key, "-threads")==0) {
threads = atoi(value);
} else if (strcmp(key, "-pert")==0) {
PERTURB_EDGES = (atoi(value)!=0);
} else if (strcmp(key, "-pathgreedy")==0) {
LABEL_GREEDY = (atoi(value)==0);
} else {
fprintf (stderr, "Parameter [%s] not recognized.\n", key);
}
}
RFWRandom::randomize(seed);
if (DIMACS_FORMAT) {
g.ReadDimacs(filename, PERTURB_EDGES);
} else {
g.ReadMETIS(filename, PERTURB_EDGES);
}
if (prefix != NULL) {
ComputeSimpleOrders(prefix, g, false, false, 0);
ComputeSimpleOrders(prefix, g, false, true, 0);
ComputeSimpleOrders(prefix, g, true, false, 0);
ComputeSimpleOrders(prefix, g, true, true, 0);
exit(-1);
}
/*
if (PERTURB_EDGES) {
int n = g.VertexCount();
vector<int> old2new(n+1,0);
for (int v=1; v<=n; v++) old2new[v] = v;
FILE *outgraph = fopen ("pert.graph", "w");
OutputMETIS (outgraph, g, old2new); //WARNING: NOT WEIGHTED
fclose(outgraph);
}*/
fprintf (stderr, "Read graph with %d vertices and %d edges.\n", g.VertexCount(), g.EdgeCount());
//exit(-1);
//ExtractGiantComponent(g);
fflush(stderr);
//exit(-1);
//exit(-1);
//g.ReadMETIS(argv[1], PERTURB_EDGES);
//g.ReadSTP(argv[1]);
//int maxthreads = -1;
int maxthreads = threads; //atoi(argv[2]);
SetActualThreads(maxthreads,true);
fprintf (stderr, "Should be using %d threads.\n", maxthreads);
fprintf (stderr, "perturb %d\n", PERTURB_EDGES);
fprintf (stderr, "pathgreedy %d\n", !LABEL_GREEDY);
fprintf (stderr, "threads %d\n", maxthreads);
fprintf (stderr, "seed %d\n", seed);
//DensestSubgraph(g);
//exit(-1);
double timedist, timecover, timeadd;
int n = g.VertexCount();
CoverCounters covercount(n); //statistics about what paths each candidate hub covers
// now we have the distance table among everybody
fprintf (stderr, "Creating matrix...\n");
fflush (stderr);
Matrix<int> dt(n); //distances
fprintf (stderr, "Matrix created...\n");
fflush (stderr);
RFWTimer timer (true);
#pragma omp parallel for schedule(dynamic)
for (int s=1; s<=n; s++) {
Dijkstra(g,s,dt.dist[s]);
double t = timer.getTime();
if (s % 100 == 0) {
fprintf (stderr, "%.2f:%.2f ", t, (double)n * (double)t / (double)s);
fflush(stderr);
}
}
timedist = timer.getTime();
fprintf (stderr, "Distance table created in %.2f milliseconds.\n", 1000.0*timedist);
timer.start();
//dt.Output(stderr);
fprintf (stderr, "Creating labels... (%.2f)\n", 1000 * timer.getTime());
fflush (stderr);
Labels flabels(n);
Labels blabels(n);
double totalcovered = 0;
Matrix<int> covered(n);
covered.Reset(false);
for (int u=1; u<=n; u++) {
for (int w=1; w<=n; w++) {
if (dt.Get(u,w) >= INFINITY) {covered.Set(u,w,n+1);}
else totalcovered ++;
}
}
vector<int> rank(n+1,-1); //rank[v]: rank of vertex v
vector<int> rank2vertex(n+1,1);
//BinaryHeap<int> heap(n); //cou
vector<int> curcover(n+1,0);
//MatrixComputeCoverage(g, curcover, covered, dt, LABEL_GREEDY, &covercount);
GraphComputeCoverageCenter(g, curcover, covered, dt, LABEL_GREEDY, &covercount);
//FancyComputeCoverage(g, curcover, covered, dt, LABEL_GREEDY, &covercount);
timecover = timer.getTime();
fprintf (stderr, "Initial coverage took %.3f seconds.\n", timecover);
double sumcover = 0;
for (int v=1; v<=n; v++) {
sumcover += (double)(curcover[v]);
}
fprintf (stderr, "The average path is covered by %.4f vertices.\n", sumcover / ((double)n * (double)n));
fprintf (stderr, "\n");
fflush(stderr);
//fprintf (stderr, "Exiting.\n");
//exit(-1);
timer.start();
vector<int> forward(n+1,false);
vector<int> backward(n+1,false);
double totalsize = 0;
int hubcount = 0;
while (1) {
//---------------------------------
// pick vertex with the best score
//---------------------------------
int bestv = -1;
double bestscore = 0;
for (int v=1; v<=n; v++) {
if (curcover[v] == 0) continue;
double curscore = curcover[v];
if (LABEL_GREEDY) {
curscore /= (double)(covercount.backcount[v] + covercount.fwdcount[v]);
}
//curscore = g.GetDegree(v);
if (bestv<0 || curscore > bestscore) {
bestv = v;
bestscore = curscore;
}
}
if (bestv<0) {
fprintf (stderr, "Done with all vertices.\n");
fflush(stderr);
break;
}
hubcount ++;
rank[bestv] = hubcount;
rank2vertex[hubcount] = bestv;
fprintf (stderr, "Hub %d (%d) covers %d paths.", hubcount, bestv, curcover[bestv]);
int origcount = curcover[bestv];
int vcount = 0;
if (LABEL_GREEDY) {fprintf (stderr, "[%d:%d] ", covercount.fwdcount[bestv], covercount.backcount[bestv]);}
for (int v=0; v<=n; v++) {forward[v] = backward[v] = false;}
int addcount = 0;
vector<int> targetlist;
targetlist.reserve(n+1);
BFSData coveragebfs(n);
RFWTimer localtimer(true);
int timerstep = 500000;
int timertarget = timerstep;
//-------------------------------------------
// identify all uncovered pairs hit by bestv
//-------------------------------------------
bool FROM_SCRATCH = true;
double target =(double)n*(double)n;
fprintf (stderr, "[%.0f pairs] ", target);
FROM_SCRATCH = ((double)origcount / target > 0.1);
//FROM_SCRATCH = false;
fprintf (stderr, "s%d ", FROM_SCRATCH);
UpdateCoverageInfo(g, bestv, curcover, covered, dt, forward, backward, LABEL_GREEDY, &covercount, flabels, blabels, addcount, totalcovered, vcount, origcount, !FROM_SCRATCH);
if (FROM_SCRATCH) {
for (int x=1; x<=n; x++) curcover[x] = 0;
if (LABEL_GREEDY) covercount.Reset();
GraphComputeCoverageCenter(g, curcover, covered, dt, LABEL_GREEDY, &covercount);
}
#if 0
//#pragma omp parallel for schedule(dynamic)
for (int u=1; u<=n; u++) {
if (covered.Get(u,bestv)) continue; //speedup! already covered this part
int duv = dt.Get(u,bestv);
targetlist.clear();
// do we really need to loop over everything to mark?
for (int w=1; w<=n; w++) {
//fprintf (stderr, "<%d,%d> ", u, w);
if (covered.Get(u,w)) continue; // fprintf (stderr, "!");
//fflush(stderr);
if (dt.Get(u,w) == duv + dt.Get(bestv,w)) {
if (!forward[u]) {
forward[u] = true;
flabels.AddEntry(u,bestv,dt.Get(u,bestv));
addcount ++;
}
if (!backward[w]) {
backward[w] = true;
blabels.AddEntry(w,bestv,dt.Get(bestv,w));
addcount ++;
}
if (LABEL_GREEDY) {covercount.RemovePath(u,bestv,w);}
targetlist.push_back(w);
// decrease conters of everybody that covers this path
// could save something here by having two (pre-)sorted lists at x
// the first has distances to x, the second distances from x
// we can do binary search for the first match, then just walk within the desired range;
// could delete entries as paths are covered
/*
bool UPDATE_COUNT = true;
if (UPDATE_COUNT) {
int duw = dt.Get(u,w); //must find all x that hit the u-w path
for (int x=1; x<=n; x++) {
if (duw == dt.Get(u,x) + dt.Get(x,w)) {
if (curcover[x] == 0) continue; //x doesn't cover anything anyway
if (x==bestv) continue;
curcover[x] --;
if (LABEL_GREEDY) {covercount.RemovePath(u,x,w);}
}
}
}*/
totalcovered --;
vcount ++;
covered.Set(u,w,bestv);
}
}
// now remember all paths starting at u
bool SIMPLE = false;
if (SIMPLE) {
int tcount = targetlist.size();
for (int i=0; i<tcount; i++) {
int w = targetlist[i];
int duw = dt.Get(u,w); //must find all x that hit the u-w path
for (int x=1; x<=n; x++) {
if (duw == dt.Get(u,x) + dt.Get(x,w)) {
if (curcover[x] == 0) continue; //x doesn't cover anything anyway
if (x==bestv) continue;
curcover[x] --;
if (LABEL_GREEDY) {covercount.RemovePath(u,x,w);}
}
}
}
} else {
//DecreaseCoverageMatrix(n, u, curcover, bestv, targetlist, dt, &covercount);
DecreaseCoverageGraph(g, u, bestv, curcover, targetlist, dt, &covercount, coveragebfs, covered);
// for fixed u and considering all w such that u-w is newly convered,
// we have a path u-w.
// For each such path, we must find all other elements x that hit u-w as well
/*
int tcount = targetlist.size();
for (int x=1; x<=n; x++) { // try all possible middle vertices
if (curcover[x] == 0) continue; //vertex no longer relevant
if (x==bestv) continue;
int dux = dt.Get(u,x); // prefix
for (int i=0; i<tcount; i++) {
int w = targetlist[i];
if (dt.Get(u,w) == dux + dt.Get(x,w)) {
curcover[x] --;
if (LABEL_GREEDY) {covercount.RemovePath(u,x,w);}
}
}
}*/
}
if (vcount > timertarget) {
while (vcount > timertarget) {
timertarget += timerstep;
}
double t = localtimer.getTime();
fprintf (stderr, "%.0f/%.0f ", t, (double)origcount * t / (double)vcount);
fflush(stderr);
}
//fflush(stderr);
}
#endif
totalsize += addcount;
fprintf (stderr, "Counted %d; added %d label entries; %.2f pct uncovered; %.2f seconds\n", vcount, addcount, 100 * totalcovered / ((double)n*(double)n), timer.getTime());
fflush(stderr);
curcover[bestv] = 0;
//fflush(stderr);
}
timeadd = timer.getTime();
fprintf (stderr, "time_distance %.2f\n", timedist);
fprintf (stderr, "time_cover %.2f\n", timecover);
fprintf (stderr, "time_add %.2f\n", timeadd);
fprintf (stderr, "time_total %.2f\n", timedist + timecover + timeadd);
fprintf (stderr, "Average label size is %.2f\n", (double)totalsize / (double)(2*n));
fflush(stderr);
fflush(stdout);
//if (LABEL_GREEDY) {covercount.Output(stderr);}
bool OUTPUT_LABELS = true;
if (OUTPUT_LABELS) {
fprintf (stderr, "Outputting labels... ");
blabels.sort();
flabels.sort();
FILE *file = fopen ("output.labels", "w");
if (!file) {
fprintf (stderr, "Could not open labels file for writing.\n");
} else {
for (int v=1; v<=n; v++) {
OutputDetailedLabel(file, g, flabels, blabels, v, rank);
}
fclose(file);
}
fprintf (stderr, "done.\n");
}
bool OUTPUT_ORDER = true;
if (OUTPUT_ORDER) {
OutputOrder(rank2vertex, n, "output.order");
/*
FILE *file = fopen ("output.order", "w");
if (!file) {
fprintf (stderr, "Could not open labels file for writing.\n");
} else {
for (int r=1; r<=n; r++) {
fprintf (file, "%d\n", rank2vertex[r]);
}
fclose(file);
}
fprintf (stderr, "done.\n");*/
}
//flabels.Output();
bool CHECK_LABELS = true;
if (CHECK_LABELS) {
fprintf (stderr, "Checking correctness of all labels...\n");
fflush(stderr);
blabels.sort();
flabels.sort();
for (int v=1; v<=n; v++) {
for (int w=1; w<=n; w++) {
int labdist = Query(flabels.labels[v], blabels.labels[w]);
int dist = dt.Get(v,w);
//fprintf (stderr, "(%d,%d): %d,%d\n", v, w, labdist, dist);
if (dist != labdist) {
flabels.OutputLabel(v);
blabels.OutputLabel(w);
exit(-1);
}
}
}
fprintf (stderr, "Labels are valid.\n");
fflush(stderr);
}
// must compute the
}
int Query(vector<LabelEntry> &flabel, vector<LabelEntry> &blabel) {
int bestval = 1000000000;
int i = 0;
int j = 0;
int fsize = flabel.size();
int bsize = blabel.size();
while (1) {
if (i == fsize) break;
if (j == bsize) break;
//fprintf (stderr, "%d:%d ", flabel[i].hub, blabel[j].hub);
int diff = flabel[i].hub - blabel[j].hub;
if (diff == 0) {
//fprintf (stderr, "m%d ", flabel[i].hub);
int d = flabel[i].dist + blabel[j].dist;
if (d < bestval) {bestval = d;}
i++; j++;
} else {
if (diff > 0) {j++;}
else {i++;}
}
}
return bestval;
}
void Dijkstra (Graph &g, int s, int *dist) {
bool verbose = false;
if (verbose) fprintf (stderr, "Dij "); //RUNNING DIJKSTRA!\n");
int n = g.VertexCount();
BinaryHeap<EdgeCost> heap(n); // = new BinaryHeap<ArcCost>(n);
int INFINITY = 1000000000;
for (int v=0; v<=n; v++) dist[v] = INFINITY;
int nscanned = 0;
heap.Insert(s, 0);
dist[s] = 0;
while (!heap.IsEmpty()) {
unsigned int v;
EdgeCost vdist;
heap.RemoveFirst(v,vdist); //v, out acost);
//scan outgoing arcs
nscanned ++;
SPGArc *a, *end;
for (g.GetBounds(v,a,end); a<end; a++) {
int w = a->head; //neighbor
int dw = vdist + a->cost;
if (dw < dist[w]) {
dist[w] = dw;
heap.Insert(w, dw);
}
}
}
if (verbose) {
//if (nscanned != n) {
fprintf (stderr, "Scanned %d/%d vertices.\n", nscanned, n);
fflush(stderr);
//}
}
}
}; |
SimpleLsh.h | /*
* File: LSH_all.h
* Author: chteflio
*
* Created on March 26, 2015, 9:16 AM
*/
#ifndef LSH_ALL_H
#define LSH_ALL_H
namespace mips {
class SimpleLsh : public Mip {
ProbeBucket probeBucket, probeBucketK;
std::vector<RetrievalArguments> retrArg;
LempArguments args;
inline void transformQueryMatrix(VectorMatrix& leftMatrix, VectorMatrix& queryMatrix) {
// transform queryMatrix (transform ||q|| to 1 and q = [0;q])
queryMatrix.rowNum = leftMatrix.rowNum;
queryMatrix.colNum = leftMatrix.colNum + 1;
queryMatrix.initializeBasics(queryMatrix.colNum, queryMatrix.rowNum, false);
#pragma omp parallel for schedule(static,1000)
for (row_type i = 0; i < queryMatrix.rowNum; i++) {
double* dQuery = leftMatrix.getMatrixRowPtr(i);
double* dTmp = queryMatrix.getMatrixRowPtr(i);
// division is expensive! multiply with inverse instead
double invLen = 1 / calculateLength(leftMatrix.getMatrixRowPtr(i), leftMatrix.colNum);
// all but last coordinates
scaleAndCopy(dTmp + 1, dQuery, invLen, leftMatrix.colNum);
// last coordinate
dTmp[0] = 0;
queryMatrix.setLengthInData(i, 1); // ||q|| = 1
}
}
inline void transformProbeMatrix(VectorMatrix& rightMatrix) {
// transform probeMatrix (transform ||p|| to less than 1 and p = [sqrt(1- ||p|| * ||p||);p])
// we need the longest vector from probeMatrix
double maxLen = 0;
for (row_type i = 0; i < rightMatrix.rowNum; i++) {
double len = calculateLength(rightMatrix.getMatrixRowPtr(i), rightMatrix.colNum);
if (len > maxLen) {
maxLen = len;
}
}
double invMaxLen = 1 / maxLen;
probeMatrix.rowNum = rightMatrix.rowNum;
probeMatrix.colNum = rightMatrix.colNum + 1;
probeMatrix.initializeBasics(probeMatrix.colNum, probeMatrix.rowNum, false);
#pragma omp parallel for schedule(static,1000)
for (row_type i = 0; i < probeMatrix.rowNum; i++) {
double* dProbe = rightMatrix.getMatrixRowPtr(i);
double* dTmp = probeMatrix.getMatrixRowPtr(i);
// all but last coordinates
scaleAndCopy(dTmp + 1, dProbe, invMaxLen, rightMatrix.colNum); // multiply with inverse
// last coordinate
double len = calculateLength(dTmp, rightMatrix.colNum); // use new values but without the new coordinate
dTmp[0] = ((1 - len * len) < 0) ? 0 : sqrt(1 - len * len);
probeMatrix.setLengthInData(i, 1); // set to 1 by the transformation
}
}
inline void printAlgoName(const VectorMatrix& leftMatrix) {
logging << "SIMPLE_LSH" << "\t" << args.threads << "\t";
std::cout << "[ALGORITHM] SIMPLE_LSH with " << args.threads << " thread(s)" << std::endl;
logging << "P(" << probeMatrix.rowNum << "x" << (0 + probeMatrix.colNum) << ")\t";
logging << "Q^T(" << leftMatrix.rowNum << "x" << (0 + leftMatrix.colNum) << ")\t";
}
inline void initializeInternal(std::vector<VectorMatrix>& queryMatrices, VectorMatrix& leftMatrix) {
std::cout << "[RETRIEVAL] QueryMatrix contains " << leftMatrix.rowNum << " vectors with dimensionality " << (0 + leftMatrix.colNum) << std::endl;
row_type myNumThreads = args.threads;
if (leftMatrix.rowNum < args.threads) {
myNumThreads = leftMatrix.rowNum;
std::cout << "[WARNING] Query matrix contains too few elements. Suboptimal running with " << myNumThreads << " thread(s)" << std::endl;
}
omp_set_num_threads(myNumThreads);
queryMatrices.resize(myNumThreads);
timer.start();
if (!isTransformed) {
std::cout << "[RETRIEVAL] QueryMatrix will be transformed" << std::endl;
VectorMatrix queryMatrix;
SimpleLsh::transformQueryMatrix(leftMatrix, queryMatrix);
splitMatrices(queryMatrix, queryMatrices);
} else {
splitMatrices(leftMatrix, queryMatrices);
}
timer.stop();
dataPreprocessingTimeLeft += timer.elapsedTime().nanos();
for (row_type i = 0; i < myNumThreads; i++) {
retrArg[i].initializeBasics(queryMatrices[i], probeMatrix, LEMP_LSH, args.theta, args.k, myNumThreads, 1, 0, 0, 0, false, false);
retrArg[i].init(probeMatrix.rowNum);
retrArg[i].clear();
}
}
inline void processIndexesTopk(double * query, row_type queryId,
LshIndex* index, LshIndex* queryIndex, ProbeBucket& probeBucket, RetrievalArguments* arg) {
row_type numCandidatesToVerify = 0;
index->lshBins->getCandidates(queryIndex->cosSketches->sketches, queryId, arg->candidatesToVerify, numCandidatesToVerify,
arg->done, LSH_SIGNATURES, probeBucket.startPos);
verifyCandidatesTopK_noLengthTest(query, numCandidatesToVerify, arg);
}
public:
bool isTransformed;
inline SimpleLsh(InputArguments& input, bool isTransformed) : isTransformed(isTransformed) {
args.copyInputArguments(input);
// now do the logging
logging.open(args.logFile.c_str(), std::ios_base::app);
if (!logging.is_open()) {
std::cout << "[WARNING] No log will be created!" << std::endl;
} else {
std::cout << "[INFO] Logging in " << args.logFile << std::endl;
}
omp_set_num_threads(args.threads);
retrArg.resize(args.threads);
}
inline ~SimpleLsh() {
logging.close();
}
void initialize(VectorMatrix& rightMatrix) {
std::cout << "[INIT] ProbeMatrix contains " << rightMatrix.rowNum << " vectors with dimensionality " << (0 + rightMatrix.colNum) << std::endl;
logging << "P(" << rightMatrix.rowNum << "x" << (0 + rightMatrix.colNum) << ")\t";
if (!isTransformed) {
std::cout << "[INIT] ProbeMatrix will be transformed" << std::endl;
timer.start();
transformProbeMatrix(rightMatrix);
timer.stop();
dataPreprocessingTimeRight += timer.elapsedTime().nanos();
} else {
probeMatrix = rightMatrix;
}
probeBucketK.init(probeMatrix, 0, args.k, args);
probeBucket.init(probeMatrix, args.k, probeMatrix.rowNum, args); // initialize
if (probeBucket.ptrIndexes[LSH] == 0)
probeBucket.ptrIndexes[LSH] = new LshIndex;
static_cast<LshIndex*> (probeBucket.ptrIndexes[LSH])->initializeLists(probeMatrix, true, args.k, probeMatrix.rowNum);
}
inline void runTopK(VectorMatrix& leftMatrix, Results& results) {
printAlgoName(leftMatrix);
std::vector<VectorMatrix> queryMatrices;
initializeInternal(queryMatrices, leftMatrix);
results.resultsVector.resize(args.threads);
LshIndex* index = static_cast<LshIndex*> (probeBucket.getIndex(LSH));
timer.start();
if (LSH_SIGNATURES > index->initializedSketchesForIndex) {
index->checkAndReallocateAll(retrArg[0].probeMatrix, true, probeBucket.startPos, probeBucket.endPos, LSH_SIGNATURES,
retrArg[0].sums, retrArg[0].countsOfBlockValues, retrArg[0].sketches, false);
}
timer.stop();
dataPreprocessingTimeRight += timer.elapsedTime().nanos();
std::cout << "[RETRIEVAL] Retrieval (k = " << args.k << ") starts ..." << std::endl;
logging << "k(" << args.k << ")\t";
timer.start();
comp_type comparisons = 0;
#pragma omp parallel reduction(+ : comparisons)
{
row_type tid = omp_get_thread_num();
LshIndex queryIndex; // separate for each thread
queryIndex.initializeLists(queryMatrices[tid], false, 0, queryMatrices[tid].rowNum);
queryIndex.checkAndReallocateAll(retrArg[tid].queryMatrix, false, 0, queryMatrices[tid].rowNum, LSH_SIGNATURES,
retrArg[tid].sums, retrArg[tid].countsOfBlockValues, retrArg[tid].sketches, false);
retrArg[tid].allocTopkResults();
for (row_type i = 0; i < queryMatrices[tid].rowNum; i++) {
double* query = queryMatrices[tid].getMatrixRowPtr(i);
retrArg[tid].queryId = i;
for (row_type j = 0; j < args.k; j++) {
double ip = queryMatrices[tid].innerProduct(i, probeMatrix.getMatrixRowPtr(j));
retrArg[tid].comparisons++;
retrArg[tid].heap[j] = QueueElement(ip, j);
}
std::make_heap(retrArg[tid].heap.begin(), retrArg[tid].heap.end(), std::greater<QueueElement>()); //make the heap;
processIndexesTopk(query, i, index, &queryIndex, probeBucket, &retrArg[tid]);
retrArg[tid].writeHeapToTopk(i);
}
retrArg[tid].extendIncompleteResultItems();
results.moveAppend(retrArg[tid].results, tid);
comparisons += retrArg[tid].comparisons;
}
timer.stop();
retrievalTime += timer.elapsedTime().nanos();
totalComparisons += comparisons;
std::cout << "[RETRIEVAL] ... and is finished with " << results.getResultSize() << " results" << std::endl;
logging << results.getResultSize() << "\t";
outputStats();
}
};
}
#endif /* LSH_ALL_H */
|
t_cholmod_gpu.c | /* ========================================================================== */
/* === GPU/t_cholmod_gpu ==================================================== */
/* ========================================================================== */
/* -----------------------------------------------------------------------------
* CHOLMOD/GPU Module. Copyright (C) 2005-2012, Timothy A. Davis
* http://www.suitesparse.com
* -------------------------------------------------------------------------- */
/* GPU BLAS template routine for cholmod_super_numeric. */
/* ========================================================================== */
/* === include files and definitions ======================================== */
/* ========================================================================== */
#ifdef GPU_BLAS
#include <string.h>
#include "cholmod_template.h"
#include "cholmod_gpu_kernels.h"
#include <fenv.h>
#include <cuda.h>
#include <cuda_runtime.h>
#undef L_ENTRY
#ifdef REAL
#define L_ENTRY 1
#else
#define L_ENTRY 2
#endif
/* ========================================================================== */
/* === gpu_clear_memory ===================================================== */
/* ========================================================================== */
/*
* Ensure the Lx is zeroed before forming factor. This is a significant cost
* in the GPU case - so using this parallel memset code for efficiency.
*/
void TEMPLATE2 (CHOLMOD (gpu_clear_memory))
(
double* buff,
size_t size,
int num_threads
)
{
int chunk_multiplier = 5;
int num_chunks = chunk_multiplier * num_threads;
size_t chunksize = size / num_chunks;
ptrdiff_t i;
#pragma omp parallel for num_threads(num_threads) private(i) schedule(dynamic)
for(i = 0; i < num_chunks; i++) {
size_t chunkoffset = i * chunksize;
if(i == num_chunks - 1) {
memset(buff + chunkoffset, 0, (size - chunksize*(num_chunks - 1)) *
sizeof(double));
}
else {
memset(buff + chunkoffset, 0, chunksize * sizeof(double));
}
}
}
/* ========================================================================== */
/* === gpu_init ============================================================= */
/* ========================================================================== */
/*
* Performs required initialization for GPU computing.
*
* Returns 0 if there is an error, so the intended use is
*
* useGPU = CHOLMOD(gpu_init)
*
* which would locally turn off gpu processing if the initialization failed.
*/
int TEMPLATE2 (CHOLMOD (gpu_init))
(
void *Cwork,
cholmod_factor *L,
cholmod_common *Common,
Int nsuper,
Int n,
Int nls,
cholmod_gpu_pointers *gpu_p
)
{
Int i, k, maxSize ;
cublasStatus_t cublasError ;
cudaError_t cudaErr ;
size_t maxBytesSize, HostPinnedSize ;
#ifdef HAVE_FEENABLEEXCEPT
feenableexcept (FE_DIVBYZERO | FE_INVALID | FE_OVERFLOW );
#elif defined(HAVE__CONTROLFP)
_controlfp(_EM_ZERODIVIDE | _EM_INVALID | _EM_OVERFLOW, _MCW_EM);
#endif /* defined(HAVE_FEENABLEEXCEPT) */
maxSize = L->maxcsize;
/* #define PAGE_SIZE (4*1024) */
CHOLMOD_GPU_PRINTF (("gpu_init : %p\n",
(void *) ((size_t) Cwork & ~(4*1024-1)))) ;
/* make sure the assumed buffer sizes are large enough */
if ( (nls+2*n+4)*sizeof(Int) > Common->devBuffSize ) {
ERROR (CHOLMOD_GPU_PROBLEM,"\n\n"
"GPU Memory allocation error. Ls, Map and RelativeMap exceed\n"
"devBuffSize. It is not clear if this is due to insufficient\n"
"device or host memory or both. You can try:\n"
" 1) increasing the amount of GPU memory requested\n"
" 2) reducing CHOLMOD_NUM_HOST_BUFFERS\n"
" 3) using a GPU & host with more memory\n"
"This issue is a known limitation and should be fixed in a \n"
"future release of CHOLMOD.\n") ;
return (0) ;
}
/* divvy up the memory in dev_mempool */
gpu_p->d_Lx[0] = Common->dev_mempool;
gpu_p->d_Lx[1] = (double*)((unsigned char*)Common->dev_mempool + Common->devBuffSize);
gpu_p->d_C = (double*)((unsigned char*)Common->dev_mempool + 2*Common->devBuffSize);
gpu_p->d_A[0] = (double*)((unsigned char*)Common->dev_mempool + 3*Common->devBuffSize);
gpu_p->d_A[1] = (double*)((unsigned char*)Common->dev_mempool + 4*Common->devBuffSize);
gpu_p->d_Ls = (double*)((unsigned char*)Common->dev_mempool + 5*Common->devBuffSize);
gpu_p->d_Map = (double*)((unsigned char*)gpu_p->d_Ls + (nls+1)*sizeof(Int));
gpu_p->d_RelativeMap = (double*)((unsigned char*)gpu_p->d_Map + (n+1)*sizeof(Int));
/* Copy all of the Ls and Lpi data to the device. If any supernodes are
* to be computed on the device then this will be needed, so might as
* well do it now. */
cudaErr = cudaMemcpy ( gpu_p->d_Ls, L->s, nls*sizeof(Int),
cudaMemcpyHostToDevice );
CHOLMOD_HANDLE_CUDA_ERROR(cudaErr,"cudaMemcpy(d_Ls)");
if (!(Common->gpuStream[0])) {
/* ------------------------------------------------------------------ */
/* create each CUDA stream */
/* ------------------------------------------------------------------ */
for ( i=0; i<CHOLMOD_HOST_SUPERNODE_BUFFERS; i++ ) {
cudaErr = cudaStreamCreate ( &(Common->gpuStream[i]) );
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA stream") ;
return (0) ;
}
}
/* ------------------------------------------------------------------ */
/* create each CUDA event */
/* ------------------------------------------------------------------ */
for (i = 0 ; i < 3 ; i++) {
cudaErr = cudaEventCreateWithFlags
(&(Common->cublasEventPotrf [i]), cudaEventDisableTiming) ;
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ;
return (0) ;
}
}
for (i = 0 ; i < CHOLMOD_HOST_SUPERNODE_BUFFERS ; i++) {
cudaErr = cudaEventCreateWithFlags
(&(Common->updateCBuffersFree[i]), cudaEventDisableTiming) ;
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event") ;
return (0) ;
}
}
cudaErr = cudaEventCreateWithFlags ( &(Common->updateCKernelsComplete),
cudaEventDisableTiming );
if (cudaErr != cudaSuccess) {
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA updateCKernelsComplete event") ;
return (0) ;
}
}
gpu_p->h_Lx[0] = (double*)(Common->host_pinned_mempool);
for ( k=1; k<CHOLMOD_HOST_SUPERNODE_BUFFERS; k++ ) {
gpu_p->h_Lx[k] = (double*)((char *)(Common->host_pinned_mempool) +
k*Common->devBuffSize);
}
return (1); /* initialization successfull, useGPU = 1 */
}
/* ========================================================================== */
/* === gpu_reorder_descendants ============================================== */
/* ========================================================================== */
/* Reorder the descendant supernodes as:
* 1st - descendant supernodes eligible for processing on the GPU
* in increasing (by flops) order
* 2nd - supernodes whose processing is to remain on the CPU
* in any order
*
* All of the GPU-eligible supernodes will be scheduled first. All
* CPU-eligible descendants will overlap with the last (largest)
* CHOLMOD_HOST_SUPERNODE_BUFFERS GPU-eligible descendants.
*/
void TEMPLATE2 (CHOLMOD (gpu_reorder_descendants))
(
cholmod_common *Common,
Int *Super,
Int *locals,
Int *Lpi,
Int *Lpos,
Int *Head,
Int *Next,
Int *Previous,
Int *ndescendants,
Int *tail,
Int *mapCreatedOnGpu,
cholmod_gpu_pointers *gpu_p
)
{
Int prevd, nextd, firstcpu, d, k, kd1, kd2, ndcol, pdi, pdend, pdi1;
Int dnext, ndrow2, p;
Int n_descendant = 0;
double score;
/* use h_Lx[0] to buffer the GPU-eligible descendants */
struct cholmod_descendant_score_t* scores =
(struct cholmod_descendant_score_t*) gpu_p->h_Lx[0];
double cpuref = 0.0;
int nreverse = 1;
int previousd;
d = Head[*locals];
prevd = -1;
firstcpu = -1;
*mapCreatedOnGpu = 0;
while ( d != EMPTY )
{
/* Get the parameters for the current descendant supernode */
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
ndrow2 = pdend - pdi1;
nextd = Next[d];
/* compute a rough flops 'score' for this descendant supernode */
score = ndrow2 * ndcol;
if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT &&
ndcol*L_ENTRY >= CHOLMOD_ND_COL_LIMIT ) {
score += Common->devBuffSize;
}
/* place in sort buffer */
scores[n_descendant].score = score;
scores[n_descendant].d = d;
n_descendant++;
d = nextd;
}
/* Sort the GPU-eligible supernodes */
qsort ( scores, n_descendant, sizeof(struct cholmod_descendant_score_t),
(int (*)(const void*, const void*))CHOLMOD(score_comp) );
/* Place sorted data back in descendant supernode linked list*/
if ( n_descendant > 0 ) {
Head[*locals] = scores[0].d;
if ( n_descendant > 1 ) {
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
if (n_descendant > 64)
for ( k=1; k<n_descendant; k++ ) {
Next[scores[k-1].d] = scores[k].d;
}
}
Next[scores[n_descendant-1].d] = firstcpu;
}
/* reverse the first CHOLMOD_HOST_SUPERNODE_BUFFERS to better hide PCIe
communications */
if ( Head[*locals] != EMPTY && Next[Head[*locals]] != EMPTY ) {
previousd = Head[*locals];
d = Next[Head[*locals]];
while ( d!=EMPTY && nreverse < CHOLMOD_HOST_SUPERNODE_BUFFERS ) {
kd1 = Super [d] ; /* d contains cols kd1 to kd2-1 of L */
kd2 = Super [d+1] ;
ndcol = kd2 - kd1 ; /* # of columns in all of d */
pdi = Lpi [d] ; /* pointer to first row of d in Ls */
pdend = Lpi [d+1] ; /* pointer just past last row of d in Ls */
p = Lpos [d] ; /* offset of 1st row of d affecting s */
pdi1 = pdi + p ; /* ptr to 1st row of d affecting s in Ls */
ndrow2 = pdend - pdi1;
nextd = Next[d];
nreverse++;
if ( ndrow2*L_ENTRY >= CHOLMOD_ND_ROW_LIMIT && ndcol*L_ENTRY >=
CHOLMOD_ND_COL_LIMIT ) {
/* place this supernode at the front of the list */
Next[previousd] = Next[d];
Next[d] = Head[*locals];
Head[*locals] = d;
}
else {
previousd = d;
}
d = nextd;
}
}
/* create a 'previous' list so we can traverse backwards */
*ndescendants = 0;
if ( Head[*locals] != EMPTY ) {
Previous[Head[*locals]] = EMPTY;
for (d = Head [*locals] ; d != EMPTY ; d = dnext) {
(*ndescendants)++;
dnext = Next[d];
if ( dnext != EMPTY ) {
Previous[dnext] = d;
}
else {
*tail = d;
}
}
}
return;
}
/* ========================================================================== */
/* === gpu_initialize_supernode ============================================= */
/* ========================================================================== */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1
*/
void TEMPLATE2 (CHOLMOD (gpu_initialize_supernode))
(
cholmod_common *Common,
Int nscol,
Int nsrow,
Int psi,
cholmod_gpu_pointers *gpu_p
)
{
cudaError_t cuErr;
/* initialize the device supernode assemby memory to zero */
cuErr = cudaMemset ( gpu_p->d_A[0], 0, nscol*nsrow*L_ENTRY*sizeof(double) );
CHOLMOD_HANDLE_CUDA_ERROR(cuErr,"cudaMemset(d_A)");
/* Create the Map on the device */
createMapOnDevice ( (Int *)(gpu_p->d_Map),
(Int *)(gpu_p->d_Ls), psi, nsrow );
return;
}
/* ========================================================================== */
/* === gpu_updateC ========================================================== */
/* ========================================================================== */
/* C = L (k1:n-1, kd1:kd2-1) * L (k1:k2-1, kd1:kd2-1)', except that k1:n-1
* refers to all of the rows in L, but many of the rows are all zero.
* Supernode d holds columns kd1 to kd2-1 of L. Nonzero rows in the range
* k1:k2-1 are in the list Ls [pdi1 ... pdi2-1], of size ndrow1. Nonzero rows
* in the range k2:n-1 are in the list Ls [pdi2 ... pdend], of size ndrow2.
* Let L1 = L (Ls [pdi1 ... pdi2-1], kd1:kd2-1), and let L2 = L (Ls [pdi2 ...
* pdend], kd1:kd2-1). C is ndrow2-by-ndrow1. Let C1 be the first ndrow1
* rows of C and let C2 be the last ndrow2-ndrow1 rows of C. Only the lower
* triangular part of C1 needs to be computed since C1 is symmetric.
*
* UpdateC is completely asynchronous w.r.t. the GPU. Once the input buffer
* d_Lx[] has been filled, all of the device operations are issues, and the
* host can continue with filling the next input buffer / or start processing
* all of the descendant supernodes which are not eligible for processing on
* the device (since they are too small - will not fill the device).
*/
int TEMPLATE2 (CHOLMOD (gpu_updateC))
(
Int ndrow1, /* C is ndrow2-by-ndrow2 */
Int ndrow2,
Int ndrow, /* leading dimension of Lx */
Int ndcol, /* L1 is ndrow1-by-ndcol */
Int nsrow,
Int pdx1, /* L1 starts at Lx + L_ENTRY*pdx1 */
/* L2 starts at Lx + L_ENTRY*(pdx1 + ndrow1) */
Int pdi1,
double *Lx,
double *C,
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrLx, *devPtrC ;
double alpha, beta ;
cublasStatus_t cublasStatus ;
cudaError_t cudaStat [2] ;
Int ndrow3 ;
int icol, irow;
int iHostBuff, iDevBuff ;
#ifndef NTIMER
double tstart = 0;
#endif
if ((ndrow2*L_ENTRY < CHOLMOD_ND_ROW_LIMIT) ||
(ndcol*L_ENTRY < CHOLMOD_ND_COL_LIMIT))
{
/* too small for the CUDA BLAS; use the CPU instead */
return (0) ;
}
ndrow3 = ndrow2 - ndrow1 ;
#ifndef NTIMER
Common->syrkStart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_SYRK_CALLS++ ;
#endif
/* ---------------------------------------------------------------------- */
/* allocate workspace on the GPU */
/* ---------------------------------------------------------------------- */
iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* cycle the device Lx buffer, d_Lx, through CHOLMOD_DEVICE_STREAMS,
usually 2, so we can overlap the copy of this descendent supernode
with the compute of the previous descendant supernode */
devPtrLx = (double *)(gpu_p->d_Lx[iDevBuff]);
/* very little overlap between kernels for difference descendant supernodes
(since we enforce the supernodes must be large enough to fill the
device) so we only need one C buffer */
devPtrC = (double *)(gpu_p->d_C);
/* ---------------------------------------------------------------------- */
/* copy Lx to the GPU */
/* ---------------------------------------------------------------------- */
/* copy host data to pinned buffer first for better H2D bandwidth */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) if (ndcol > 32)
for ( icol=0; icol<ndcol; icol++ ) {
for ( irow=0; irow<ndrow2*L_ENTRY; irow++ ) {
gpu_p->h_Lx[iHostBuff][icol*ndrow2*L_ENTRY+irow] =
Lx[pdx1*L_ENTRY+icol*ndrow*L_ENTRY + irow];
}
}
cudaStat[0] = cudaMemcpyAsync ( devPtrLx,
gpu_p->h_Lx[iHostBuff],
ndrow2*ndcol*L_ENTRY*sizeof(devPtrLx[0]),
cudaMemcpyHostToDevice,
Common->gpuStream[iDevBuff] );
if ( cudaStat[0] ) {
CHOLMOD_GPU_PRINTF ((" ERROR cudaMemcpyAsync = %d \n", cudaStat[0]));
return (0);
}
/* make the current stream wait for kernels in previous streams */
cudaStreamWaitEvent ( Common->gpuStream[iDevBuff],
Common->updateCKernelsComplete, 0 ) ;
/* ---------------------------------------------------------------------- */
/* create the relative map for this descendant supernode */
/* ---------------------------------------------------------------------- */
createRelativeMapOnDevice ( (Int *)(gpu_p->d_Map),
(Int *)(gpu_p->d_Ls),
(Int *)(gpu_p->d_RelativeMap),
pdi1, ndrow2,
&(Common->gpuStream[iDevBuff]) );
/* ---------------------------------------------------------------------- */
/* do the CUDA SYRK */
/* ---------------------------------------------------------------------- */
cublasStatus = cublasSetStream (Common->cublasHandle,
Common->gpuStream[iDevBuff]) ;
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ;
}
alpha = 1.0 ;
beta = 0.0 ;
#ifdef REAL
cublasStatus = cublasDsyrk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_N,
(int) ndrow1,
(int) ndcol, /* N, K: L1 is ndrow1-by-ndcol */
&alpha, /* ALPHA: 1 */
devPtrLx,
ndrow2, /* A, LDA: L1, ndrow2 */
&beta, /* BETA: 0 */
devPtrC,
ndrow2) ; /* C, LDC: C1 */
#else
cublasStatus = cublasZherk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_N,
(int) ndrow1,
(int) ndcol, /* N, K: L1 is ndrow1-by-ndcol*/
&alpha, /* ALPHA: 1 */
(const cuDoubleComplex *) devPtrLx,
ndrow2, /* A, LDA: L1, ndrow2 */
&beta, /* BETA: 0 */
(cuDoubleComplex *) devPtrC,
ndrow2) ; /* C, LDC: C1 */
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
#ifndef NTIMER
Common->CHOLMOD_GPU_SYRK_TIME += SuiteSparse_time() - Common->syrkStart;
#endif
/* ---------------------------------------------------------------------- */
/* compute remaining (ndrow2-ndrow1)-by-ndrow1 block of C, C2 = L2*L1' */
/* ---------------------------------------------------------------------- */
#ifndef NTIMER
Common->CHOLMOD_GPU_GEMM_CALLS++ ;
tstart = SuiteSparse_time();
#endif
if (ndrow3 > 0)
{
#ifndef REAL
cuDoubleComplex calpha = {1.0,0.0} ;
cuDoubleComplex cbeta = {0.0,0.0} ;
#endif
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dgemm */
/* ------------------------------------------------------------------ */
#ifdef REAL
alpha = 1.0 ;
beta = 0.0 ;
cublasStatus = cublasDgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_T,
ndrow3, ndrow1, ndcol, /* M, N, K */
&alpha, /* ALPHA: 1 */
devPtrLx + L_ENTRY*(ndrow1), /* A, LDA: L2*/
ndrow2, /* ndrow */
devPtrLx, /* B, LDB: L1 */
ndrow2, /* ndrow */
&beta, /* BETA: 0 */
devPtrC + L_ENTRY*ndrow1, /* C, LDC: C2 */
ndrow2) ;
#else
cublasStatus = cublasZgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_C,
ndrow3, ndrow1, ndcol, /* M, N, K */
&calpha, /* ALPHA: 1 */
(const cuDoubleComplex*) devPtrLx + ndrow1,
ndrow2, /* ndrow */
(const cuDoubleComplex *) devPtrLx,
ndrow2, /* ndrow */
&cbeta, /* BETA: 0 */
(cuDoubleComplex *)devPtrC + ndrow1,
ndrow2) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
}
#ifndef NTIMER
Common->CHOLMOD_GPU_GEMM_TIME += SuiteSparse_time() - tstart;
#endif
/* ------------------------------------------------------------------ */
/* Assemble the update C on the device using the d_RelativeMap */
/* ------------------------------------------------------------------ */
#ifdef REAL
addUpdateOnDevice ( gpu_p->d_A[0], devPtrC,
gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow,
&(Common->gpuStream[iDevBuff]) );
#else
addComplexUpdateOnDevice ( gpu_p->d_A[0], devPtrC,
gpu_p->d_RelativeMap, ndrow1, ndrow2, nsrow,
&(Common->gpuStream[iDevBuff]) );
#endif
/* Record an event indicating that kernels for
this descendant are complete */
cudaEventRecord ( Common->updateCKernelsComplete,
Common->gpuStream[iDevBuff]);
cudaEventRecord ( Common->updateCBuffersFree[iHostBuff],
Common->gpuStream[iDevBuff]);
return (1) ;
}
/* ========================================================================== */
/* === gpu_final_assembly =================================================== */
/* ========================================================================== */
/* If the supernode was assembled on both the CPU and the GPU, this will
* complete the supernode assembly on both the GPU and CPU.
*/
void TEMPLATE2 (CHOLMOD (gpu_final_assembly))
(
cholmod_common *Common,
double *Lx,
Int psx,
Int nscol,
Int nsrow,
int supernodeUsedGPU,
int *iHostBuff,
int *iDevBuff,
cholmod_gpu_pointers *gpu_p
)
{
Int iidx, i, j;
Int iHostBuff2 ;
Int iDevBuff2 ;
if ( supernodeUsedGPU ) {
/* ------------------------------------------------------------------ */
/* Apply all of the Shur-complement updates, computed on the gpu, to */
/* the supernode. */
/* ------------------------------------------------------------------ */
*iHostBuff = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
*iDevBuff = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
/* If this supernode is going to be factored using the GPU (potrf)
* then it will need the portion of the update assembled ont the
* CPU. So copy that to a pinned buffer an H2D copy to device. */
/* wait until a buffer is free */
cudaEventSynchronize ( Common->updateCBuffersFree[*iHostBuff] );
/* copy update assembled on CPU to a pinned buffer */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j; i<nsrow*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
gpu_p->h_Lx[*iHostBuff][iidx] = Lx[psx*L_ENTRY+iidx];
}
}
/* H2D transfer of update assembled on CPU */
cudaMemcpyAsync ( gpu_p->d_A[1], gpu_p->h_Lx[*iHostBuff],
nscol*nsrow*L_ENTRY*sizeof(double),
cudaMemcpyHostToDevice,
Common->gpuStream[*iDevBuff] );
}
Common->ibuffer++;
iHostBuff2 = (Common->ibuffer)%CHOLMOD_HOST_SUPERNODE_BUFFERS;
iDevBuff2 = (Common->ibuffer)%CHOLMOD_DEVICE_STREAMS;
/* wait for all kernels to complete */
cudaEventSynchronize( Common->updateCKernelsComplete );
/* copy assembled Schur-complement updates computed on GPU */
cudaMemcpyAsync ( gpu_p->h_Lx[iHostBuff2], gpu_p->d_A[0],
nscol*nsrow*L_ENTRY*sizeof(double),
cudaMemcpyDeviceToHost,
Common->gpuStream[iDevBuff2] );
if ( nscol * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
/* with the current implementation, potrf still uses data from the
* CPU - so put the fully assembled supernode in a pinned buffer for
* fastest access */
/* need both H2D and D2H copies to be complete */
cudaDeviceSynchronize();
/* sum updates from cpu and device on device */
#ifdef REAL
sumAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0], -1.0, nsrow, nscol );
#else
sumComplexAOnDevice ( gpu_p->d_A[1], gpu_p->d_A[0],
-1.0, nsrow, nscol );
#endif
/* place final assembled supernode in pinned buffer */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
gpu_p->h_Lx[*iHostBuff][iidx] -=
gpu_p->h_Lx[iHostBuff2][iidx];
}
}
}
else
{
/* assemble with CPU updates */
cudaDeviceSynchronize();
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nsrow*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] -= gpu_p->h_Lx[iHostBuff2][iidx];
}
}
}
}
return;
}
/* ========================================================================== */
/* === gpu_lower_potrf ====================================================== */
/* ========================================================================== */
/* Cholesky factorzation (dpotrf) of a matrix S, operating on the lower
* triangular part only. S is nscol2-by-nscol2 with leading dimension nsrow.
*
* S is the top part of the supernode (the lower triangular matrx).
* This function also copies the bottom rectangular part of the supernode (B)
* onto the GPU, in preparation for gpu_triangular_solve.
*/
/*
* On entry, d_A[1] contains the fully assembled supernode
*/
int TEMPLATE2 (CHOLMOD (gpu_lower_potrf))
(
Int nscol2, /* S is nscol2-by-nscol2 */
Int nsrow, /* leading dimension of S */
Int psx, /* S is located at Lx + L_ENTRY*psx */
double *Lx, /* contains S; overwritten with Cholesky factor */
Int *info, /* BLAS info return value */
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrA, *devPtrB, *A ;
double alpha, beta ;
cudaError_t cudaStat ;
cublasStatus_t cublasStatus ;
Int j, nsrow2, nb, n, gpu_lda, lda, gpu_ldb ;
int ilda, ijb, iinfo ;
#ifndef NTIMER
double tstart ;
#endif
if (nscol2 * L_ENTRY < CHOLMOD_POTRF_LIMIT)
{
/* too small for the CUDA BLAS; use the CPU instead */
return (0) ;
}
#ifndef NTIMER
tstart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_POTRF_CALLS++ ;
#endif
nsrow2 = nsrow - nscol2 ;
/* ---------------------------------------------------------------------- */
/* heuristic to get the block size depending of the problem size */
/* ---------------------------------------------------------------------- */
nb = 128 ;
if (nscol2 > 4096) nb = 256 ;
if (nscol2 > 8192) nb = 384 ;
n = nscol2 ;
gpu_lda = ((nscol2+31)/32)*32 ;
lda = nsrow ;
A = gpu_p->h_Lx[(Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1)%
CHOLMOD_HOST_SUPERNODE_BUFFERS];
/* ---------------------------------------------------------------------- */
/* determine the GPU leading dimension of B */
/* ---------------------------------------------------------------------- */
gpu_ldb = 0 ;
if (nsrow2 > 0)
{
gpu_ldb = ((nsrow2+31)/32)*32 ;
}
/* ---------------------------------------------------------------------- */
/* remember where device memory is, to be used by triangular solve later */
/* ---------------------------------------------------------------------- */
devPtrA = gpu_p->d_Lx[0];
devPtrB = gpu_p->d_Lx[1];
/* ---------------------------------------------------------------------- */
/* copy A from device to device */
/* ---------------------------------------------------------------------- */
cudaStat = cudaMemcpy2DAsync ( devPtrA,
gpu_lda * L_ENTRY * sizeof (devPtrA[0]),
gpu_p->d_A[1],
nsrow * L_ENTRY * sizeof (Lx[0]),
nscol2 * L_ENTRY * sizeof (devPtrA[0]),
nscol2,
cudaMemcpyDeviceToDevice,
Common->gpuStream[0] );
if ( cudaStat ) {
ERROR ( CHOLMOD_GPU_PROBLEM, "GPU memcopy device to device");
}
/* ---------------------------------------------------------------------- */
/* copy B in advance, for gpu_triangular_solve */
/* ---------------------------------------------------------------------- */
if (nsrow2 > 0)
{
cudaStat = cudaMemcpy2DAsync (devPtrB,
gpu_ldb * L_ENTRY * sizeof (devPtrB [0]),
gpu_p->d_A[1] + L_ENTRY*nscol2,
nsrow * L_ENTRY * sizeof (Lx [0]),
nsrow2 * L_ENTRY * sizeof (devPtrB [0]),
nscol2,
cudaMemcpyDeviceToDevice,
Common->gpuStream[0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
}
/* ------------------------------------------------------------------ */
/* define the dpotrf stream */
/* ------------------------------------------------------------------ */
cublasStatus = cublasSetStream (Common->cublasHandle,
Common->gpuStream [0]) ;
if (cublasStatus != CUBLAS_STATUS_SUCCESS) {
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream") ;
}
/* ---------------------------------------------------------------------- */
/* block Cholesky factorization of S */
/* ---------------------------------------------------------------------- */
for (j = 0 ; j < n ; j += nb)
{
Int jb = nb < (n-j) ? nb : (n-j) ;
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dsyrk */
/* ------------------------------------------------------------------ */
alpha = -1.0 ;
beta = 1.0 ;
#ifdef REAL
cublasStatus = cublasDsyrk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j,
&alpha, devPtrA + j, gpu_lda,
&beta, devPtrA + j + j*gpu_lda, gpu_lda) ;
#else
cublasStatus = cublasZherk (Common->cublasHandle,
CUBLAS_FILL_MODE_LOWER, CUBLAS_OP_N, jb, j,
&alpha, (cuDoubleComplex*)devPtrA + j,
gpu_lda,
&beta,
(cuDoubleComplex*)devPtrA + j + j*gpu_lda,
gpu_lda) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* ------------------------------------------------------------------ */
cudaStat = cudaEventRecord (Common->cublasEventPotrf [0],
Common->gpuStream [0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaStreamWaitEvent (Common->gpuStream [1],
Common->cublasEventPotrf [0], 0) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
/* ------------------------------------------------------------------ */
/* copy back the jb columns on two different streams */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + j*lda),
lda * L_ENTRY * sizeof (double),
devPtrA + L_ENTRY*(j + j*gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double)*jb,
jb,
cudaMemcpyDeviceToHost,
Common->gpuStream [1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ;
}
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dgemm */
/* ------------------------------------------------------------------ */
if ((j+jb) < n)
{
#ifdef REAL
alpha = -1.0 ;
beta = 1.0 ;
cublasStatus = cublasDgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_T,
(n-j-jb), jb, j,
&alpha,
devPtrA + (j+jb), gpu_lda,
devPtrA + (j) , gpu_lda,
&beta,
devPtrA + (j+jb + j*gpu_lda), gpu_lda) ;
#else
cuDoubleComplex calpha = {-1.0,0.0} ;
cuDoubleComplex cbeta = { 1.0,0.0} ;
cublasStatus = cublasZgemm (Common->cublasHandle,
CUBLAS_OP_N, CUBLAS_OP_C,
(n-j-jb), jb, j,
&calpha,
(cuDoubleComplex*)devPtrA + (j+jb),
gpu_lda,
(cuDoubleComplex*)devPtrA + (j),
gpu_lda,
&cbeta,
(cuDoubleComplex*)devPtrA +
(j+jb + j*gpu_lda),
gpu_lda ) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
}
cudaStat = cudaStreamSynchronize (Common->gpuStream [1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
/* ------------------------------------------------------------------ */
/* compute the Cholesky factorization of the jbxjb block on the CPU */
/* ------------------------------------------------------------------ */
ilda = (int) lda ;
ijb = jb ;
#ifdef REAL
LAPACK_DPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ;
#else
LAPACK_ZPOTRF ("L", &ijb, A + L_ENTRY * (j + j*lda), &ilda, &iinfo) ;
#endif
*info = iinfo ;
if (*info != 0)
{
*info = *info + j ;
break ;
}
/* ------------------------------------------------------------------ */
/* copy the result back to the GPU */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (devPtrA + L_ENTRY*(j + j*gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
A + L_ENTRY * (j + j*lda),
lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double) * jb,
jb,
cudaMemcpyHostToDevice,
Common->gpuStream [0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
/* ------------------------------------------------------------------ */
/* do the CUDA BLAS dtrsm */
/* ------------------------------------------------------------------ */
if ((j+jb) < n)
{
#ifdef REAL
alpha = 1.0 ;
cublasStatus = cublasDtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_T, CUBLAS_DIAG_NON_UNIT,
(n-j-jb), jb,
&alpha,
devPtrA + (j + j*gpu_lda), gpu_lda,
devPtrA + (j+jb + j*gpu_lda), gpu_lda) ;
#else
cuDoubleComplex calpha = {1.0,0.0};
cublasStatus = cublasZtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_C, CUBLAS_DIAG_NON_UNIT,
(n-j-jb), jb,
&calpha,
(cuDoubleComplex *)devPtrA +
(j + j*gpu_lda),
gpu_lda,
(cuDoubleComplex *)devPtrA +
(j+jb + j*gpu_lda),
gpu_lda) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* -------------------------------------------------------------- */
/* Copy factored column back to host. */
/* -------------------------------------------------------------- */
cudaStat = cudaEventRecord (Common->cublasEventPotrf[2],
Common->gpuStream[0]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaStreamWaitEvent (Common->gpuStream[1],
Common->cublasEventPotrf[2], 0) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "CUDA event failure") ;
}
cudaStat = cudaMemcpy2DAsync (A + L_ENTRY*(j + jb + j * lda),
lda * L_ENTRY * sizeof (double),
devPtrA + L_ENTRY*
(j + jb + j * gpu_lda),
gpu_lda * L_ENTRY * sizeof (double),
L_ENTRY * sizeof (double)*
(n - j - jb), jb,
cudaMemcpyDeviceToHost,
Common->gpuStream[1]) ;
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy to device") ;
}
}
}
#ifndef NTIMER
Common->CHOLMOD_GPU_POTRF_TIME += SuiteSparse_time ( ) - tstart ;
#endif
return (1) ;
}
/* ========================================================================== */
/* === gpu_triangular_solve ================================================= */
/* ========================================================================== */
/* The current supernode is columns k1 to k2-1 of L. Let L1 be the diagonal
* block (factorized by dpotrf/zpotrf above; rows/cols k1:k2-1), and L2 be rows
* k2:n-1 and columns k1:k2-1 of L. The triangular system to solve is L2*L1' =
* S2, where S2 is overwritten with L2. More precisely, L2 = S2 / L1' in
* MATLAB notation.
*/
/* Version with pre-allocation in POTRF */
int TEMPLATE2 (CHOLMOD (gpu_triangular_solve))
(
Int nsrow2, /* L1 and S2 are nsrow2-by-nscol2 */
Int nscol2, /* L1 is nscol2-by-nscol2 */
Int nsrow, /* leading dimension of L1, L2, and S2 */
Int psx, /* L1 is at Lx+L_ENTRY*psx;
* L2 at Lx+L_ENTRY*(psx+nscol2)*/
double *Lx, /* holds L1, L2, and S2 */
cholmod_common *Common,
cholmod_gpu_pointers *gpu_p
)
{
double *devPtrA, *devPtrB ;
cudaError_t cudaStat ;
cublasStatus_t cublasStatus ;
Int gpu_lda, gpu_ldb, gpu_rowstep ;
Int gpu_row_start = 0 ;
Int gpu_row_max_chunk, gpu_row_chunk;
int ibuf = 0;
int iblock = 0;
int iHostBuff = (Common->ibuffer+CHOLMOD_HOST_SUPERNODE_BUFFERS-1) %
CHOLMOD_HOST_SUPERNODE_BUFFERS;
int i, j;
Int iidx;
int iwrap;
#ifndef NTIMER
double tstart ;
#endif
#ifdef REAL
double alpha = 1.0 ;
gpu_row_max_chunk = 768;
#else
cuDoubleComplex calpha = {1.0,0.0} ;
gpu_row_max_chunk = 256;
#endif
if ( nsrow2 <= 0 )
{
return (0) ;
}
#ifndef NTIMER
tstart = SuiteSparse_time ( ) ;
Common->CHOLMOD_GPU_TRSM_CALLS++ ;
#endif
gpu_lda = ((nscol2+31)/32)*32 ;
gpu_ldb = ((nsrow2+31)/32)*32 ;
devPtrA = gpu_p->d_Lx[0];
devPtrB = gpu_p->d_Lx[1];
/* make sure the copy of B has completed */
cudaStreamSynchronize( Common->gpuStream[0] );
/* ---------------------------------------------------------------------- */
/* do the CUDA BLAS dtrsm */
/* ---------------------------------------------------------------------- */
while ( gpu_row_start < nsrow2 )
{
gpu_row_chunk = nsrow2 - gpu_row_start;
if ( gpu_row_chunk > gpu_row_max_chunk ) {
gpu_row_chunk = gpu_row_max_chunk;
}
cublasStatus = cublasSetStream ( Common->cublasHandle,
Common->gpuStream[ibuf] );
if ( cublasStatus != CUBLAS_STATUS_SUCCESS )
{
ERROR ( CHOLMOD_GPU_PROBLEM, "GPU CUBLAS stream");
}
#ifdef REAL
cublasStatus = cublasDtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_T,
CUBLAS_DIAG_NON_UNIT,
gpu_row_chunk,
nscol2,
&alpha,
devPtrA,
gpu_lda,
devPtrB + gpu_row_start,
gpu_ldb) ;
#else
cublasStatus = cublasZtrsm (Common->cublasHandle,
CUBLAS_SIDE_RIGHT,
CUBLAS_FILL_MODE_LOWER,
CUBLAS_OP_C,
CUBLAS_DIAG_NON_UNIT,
gpu_row_chunk,
nscol2,
&calpha,
(const cuDoubleComplex *) devPtrA,
gpu_lda,
(cuDoubleComplex *)devPtrB + gpu_row_start ,
gpu_ldb) ;
#endif
if (cublasStatus != CUBLAS_STATUS_SUCCESS)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU CUBLAS routine failure") ;
}
/* ------------------------------------------------------------------ */
/* copy result back to the CPU */
/* ------------------------------------------------------------------ */
cudaStat = cudaMemcpy2DAsync (
gpu_p->h_Lx[iHostBuff] +
L_ENTRY*(nscol2+gpu_row_start),
nsrow * L_ENTRY * sizeof (Lx [0]),
devPtrB + L_ENTRY*gpu_row_start,
gpu_ldb * L_ENTRY * sizeof (devPtrB [0]),
gpu_row_chunk * L_ENTRY *
sizeof (devPtrB [0]),
nscol2,
cudaMemcpyDeviceToHost,
Common->gpuStream[ibuf]);
if (cudaStat)
{
ERROR (CHOLMOD_GPU_PROBLEM, "GPU memcopy from device") ;
}
cudaEventRecord ( Common->updateCBuffersFree[ibuf],
Common->gpuStream[ibuf] );
gpu_row_start += gpu_row_chunk;
ibuf++;
ibuf = ibuf % CHOLMOD_HOST_SUPERNODE_BUFFERS;
iblock ++;
if ( iblock >= CHOLMOD_HOST_SUPERNODE_BUFFERS )
{
Int gpu_row_start2 ;
Int gpu_row_end ;
/* then CHOLMOD_HOST_SUPERNODE_BUFFERS worth of work has been
* scheduled, so check for completed events and copy result into
* Lx before continuing. */
cudaEventSynchronize ( Common->updateCBuffersFree
[iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] );
/* copy into Lx */
gpu_row_start2 = nscol2 +
(iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS)
*gpu_row_max_chunk;
gpu_row_end = gpu_row_start2+gpu_row_max_chunk;
if ( gpu_row_end > nsrow ) gpu_row_end = nsrow;
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
}
/* Convenient to copy the L1 block here */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private ( iidx ) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=j*L_ENTRY; i<nscol2*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY + i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
/* now account for the last HSTREAMS buffers */
for ( iwrap=0; iwrap<CHOLMOD_HOST_SUPERNODE_BUFFERS; iwrap++ )
{
int i, j;
Int gpu_row_start2 = nscol2 + (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS)
*gpu_row_max_chunk;
if (iblock-CHOLMOD_HOST_SUPERNODE_BUFFERS >= 0 &&
gpu_row_start2 < nsrow )
{
Int iidx;
Int gpu_row_end = gpu_row_start2+gpu_row_max_chunk;
if ( gpu_row_end > nsrow ) gpu_row_end = nsrow;
cudaEventSynchronize ( Common->updateCBuffersFree
[iblock%CHOLMOD_HOST_SUPERNODE_BUFFERS] );
/* copy into Lx */
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx) if ( nscol2 > 32 )
for ( j=0; j<nscol2; j++ ) {
for ( i=gpu_row_start2*L_ENTRY; i<gpu_row_end*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
iblock++;
}
/* ---------------------------------------------------------------------- */
/* return */
/* ---------------------------------------------------------------------- */
#ifndef NTIMER
Common->CHOLMOD_GPU_TRSM_TIME += SuiteSparse_time ( ) - tstart ;
#endif
return (1) ;
}
/* ========================================================================== */
/* === gpu_copy_supernode =================================================== */
/* ========================================================================== */
/*
* In the event gpu_triangular_sovle is not needed / called, this routine
* copies the factored diagonal block from the GPU to the CPU.
*/
void TEMPLATE2 (CHOLMOD (gpu_copy_supernode))
(
cholmod_common *Common,
double *Lx,
Int psx,
Int nscol,
Int nscol2,
Int nsrow,
int supernodeUsedGPU,
int iHostBuff,
cholmod_gpu_pointers *gpu_p
)
{
Int iidx, i, j;
if ( supernodeUsedGPU && nscol2 * L_ENTRY >= CHOLMOD_POTRF_LIMIT ) {
cudaDeviceSynchronize();
#pragma omp parallel for num_threads(CHOLMOD_OMP_NUM_THREADS) \
private(iidx,i,j) if (nscol>32)
for ( j=0; j<nscol; j++ ) {
for ( i=j*L_ENTRY; i<nscol*L_ENTRY; i++ ) {
iidx = j*nsrow*L_ENTRY+i;
Lx[psx*L_ENTRY+iidx] = gpu_p->h_Lx[iHostBuff][iidx];
}
}
}
return;
}
#endif
#undef REAL
#undef COMPLEX
#undef ZOMPLEX
|
convolution_3x3_pack16.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd63_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack8, int inch, int outch, const Option& opt)
{
// winograd63 transform kernel
Mat kernel_tm;
kernel_tm.create(8 * 8, inch, outch);
const float ktm[8][3] = {
{1.0f, 0.0f, 0.0f},
{-2.0f / 9, -2.0f / 9, -2.0f / 9},
{-2.0f / 9, 2.0f / 9, -2.0f / 9},
{1.0f / 90, 1.0f / 45, 2.0f / 45},
{1.0f / 90, -1.0f / 45, 2.0f / 45},
{1.0f / 45, 1.0f / 90, 1.0f / 180},
{1.0f / 45, -1.0f / 90, 1.0f / 180},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel, transposed
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[8][3];
for (int i = 0; i < 8; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// v
for (int j = 0; j < 8; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 8; i++)
{
kernel_tm0[j * 8 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 64-inch-outch
// dst = 16b-16a-inch/16a-64-outch/16b
kernel_tm_pack8.create(inch / 16, 64, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
int q = 0;
for (; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack8.channel(q / 16);
for (int k = 0; k < 64; k++)
{
float* g00 = g0.row(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd63_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 6n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 5) / 6 * 6;
outh = (outh + 5) / 6 * 6;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 6;
int h_tiles = outh / 6;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 64, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd63_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 6 * 8;
int h_tm = outh / 6 * 8;
const int tiles = h_tm / 8 * w_tm / 8;
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 64, elemsize, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 64, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 64; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
tmpptr += 192;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
tmpptr += 128;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
tmpptr += 64;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
tmpptr += 32;
r0 += bottom_blob_tm.cstep * 16;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
tmpptr += 16;
r0 += bottom_blob_tm.cstep * 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 64, outch, elemsize, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 64; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd63_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
static void conv3x3s1_winograd43_transform_kernel_pack16_avx512(const Mat& kernel, Mat& kernel_tm_pack4, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch);
const float ktm[6][3] = {
{1.0f / 4, 0.0f, 0.0f},
{-1.0f / 6, -1.0f / 6, -1.0f / 6},
{-1.0f / 6, 1.0f / 6, -1.0f / 6},
{1.0f / 24, 1.0f / 12, 1.0f / 6},
{1.0f / 24, -1.0f / 12, 1.0f / 6},
{0.0f, 0.0f, 1.0f}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const float* kernel0 = (const float*)kernel + p * inch * 9 + q * 9;
float* kernel_tm0 = kernel_tm.channel(p).row(q);
// transform kernel
const float* k0 = kernel0;
const float* k1 = kernel0 + 3;
const float* k2 = kernel0 + 6;
// h
float tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
float* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 16b-16a-inch/16a-36-outch/16b
kernel_tm_pack4.create(inch / 16, 36, outch / 16, (size_t)4u * 16 * 16, 16 * 16);
for (int q = 0; q + 15 < outch; q += 16)
{
Mat g0 = kernel_tm_pack4.channel(q / 16);
for (int k = 0; k < 36; k++)
{
float* g00 = g0.row<float>(k);
for (int p = 0; p + 15 < inch; p += 16)
{
for (int i = 0; i < 16; i++)
{
for (int j = 0; j < 16; j++)
{
const float* k00 = kernel_tm.channel(q + j).row(p + i);
g00[0] = k00[k];
g00++;
}
}
}
}
}
}
static void conv3x3s1_winograd43_pack16_avx512(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Mat& bias, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tiles = outw / 4;
int h_tiles = outh / 4;
const int tiles = w_tiles * h_tiles;
bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
conv3x3s1_winograd43_transform_input_pack16_avx512(bottom_blob_bordered, bottom_blob_tm, opt);
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
if (tiles >= 12)
bottom_blob_tm2.create(12 * inch, tiles / 12 + (tiles % 12) / 8 + (tiles % 12 % 8) / 4 + (tiles % 12 % 4) / 2 + tiles % 12 % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + (tiles % 4) / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 2)
bottom_blob_tm2.create(2 * inch, tiles / 2 + tiles % 2, 36, 4u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
for (; i + 11 < tiles; i += 12)
{
float* tmpptr = tm2.row(i / 12);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x12
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _r8 = _mm512_load_ps(r0 + 16 * 8);
__m512 _r9 = _mm512_load_ps(r0 + 16 * 9);
__m512 _ra = _mm512_load_ps(r0 + 16 * 10);
__m512 _rb = _mm512_load_ps(r0 + 16 * 11);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_unpacklo_ps(_r8, _r9);
__m512 _tmp9 = _mm512_unpackhi_ps(_r8, _r9);
__m512 _tmpa = _mm512_unpacklo_ps(_ra, _rb);
__m512 _tmpb = _mm512_unpackhi_ps(_ra, _rb);
__m512 _tmpc = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpg = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmph = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpi = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpj = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpk = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpl = _mm512_shuffle_ps(_tmp8, _tmpa, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpm = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpn = _mm512_shuffle_ps(_tmp9, _tmpb, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp5 = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(2, 0, 2, 0));
_tmp6 = _mm512_shuffle_f32x4(_tmpc, _tmpg, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpk, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp8 = _mm512_shuffle_f32x4(_tmph, _tmpl, _MM_SHUFFLE(3, 1, 3, 1));
_tmp9 = _mm512_shuffle_f32x4(_tmpe, _tmpi, _MM_SHUFFLE(3, 1, 3, 1));
_tmpa = _mm512_shuffle_f32x4(_tmpm, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_tmpb = _mm512_shuffle_f32x4(_tmpj, _tmpn, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(2, 0, 2, 0));
_r5 = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(2, 0, 2, 0));
_r6 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r8 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r9 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_ra = _mm512_shuffle_f32x4(_tmp8, _tmp9, _MM_SHUFFLE(3, 1, 3, 1));
_rb = _mm512_shuffle_f32x4(_tmpa, _tmpb, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
_mm512_store_ps(tmpptr + 16 * 8, _r8);
_mm512_store_ps(tmpptr + 16 * 9, _r9);
_mm512_store_ps(tmpptr + 16 * 10, _ra);
_mm512_store_ps(tmpptr + 16 * 11, _rb);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 192;
}
}
for (; i + 7 < tiles; i += 8)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x8
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _r4 = _mm512_load_ps(r0 + 16 * 4);
__m512 _r5 = _mm512_load_ps(r0 + 16 * 5);
__m512 _r6 = _mm512_load_ps(r0 + 16 * 6);
__m512 _r7 = _mm512_load_ps(r0 + 16 * 7);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_unpacklo_ps(_r4, _r5);
__m512 _tmp5 = _mm512_unpackhi_ps(_r4, _r5);
__m512 _tmp6 = _mm512_unpacklo_ps(_r6, _r7);
__m512 _tmp7 = _mm512_unpackhi_ps(_r6, _r7);
__m512 _tmp8 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp9 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpa = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpb = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpc = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpd = _mm512_shuffle_ps(_tmp4, _tmp6, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmpe = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmpf = _mm512_shuffle_ps(_tmp5, _tmp7, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(2, 0, 2, 0));
_tmp3 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(2, 0, 2, 0));
_tmp4 = _mm512_shuffle_f32x4(_tmp8, _tmpc, _MM_SHUFFLE(3, 1, 3, 1));
_tmp5 = _mm512_shuffle_f32x4(_tmp9, _tmpd, _MM_SHUFFLE(3, 1, 3, 1));
_tmp6 = _mm512_shuffle_f32x4(_tmpa, _tmpe, _MM_SHUFFLE(3, 1, 3, 1));
_tmp7 = _mm512_shuffle_f32x4(_tmpb, _tmpf, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_r3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_r4 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r5 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_r6 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_r7 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
_mm512_store_ps(tmpptr + 16 * 4, _r4);
_mm512_store_ps(tmpptr + 16 * 5, _r5);
_mm512_store_ps(tmpptr + 16 * 6, _r6);
_mm512_store_ps(tmpptr + 16 * 7, _r7);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 128;
}
}
for (; i + 3 < tiles; i += 4)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x4
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _r2 = _mm512_load_ps(r0 + 16 * 2);
__m512 _r3 = _mm512_load_ps(r0 + 16 * 3);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_unpacklo_ps(_r2, _r3);
__m512 _tmp3 = _mm512_unpackhi_ps(_r2, _r3);
__m512 _tmp4 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp5 = _mm512_shuffle_ps(_tmp0, _tmp2, _MM_SHUFFLE(3, 2, 3, 2));
__m512 _tmp6 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(1, 0, 1, 0));
__m512 _tmp7 = _mm512_shuffle_ps(_tmp1, _tmp3, _MM_SHUFFLE(3, 2, 3, 2));
_tmp0 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(2, 0, 2, 0));
_tmp1 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(2, 0, 2, 0));
_tmp2 = _mm512_shuffle_f32x4(_tmp4, _tmp5, _MM_SHUFFLE(3, 1, 3, 1));
_tmp3 = _mm512_shuffle_f32x4(_tmp6, _tmp7, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r3 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
_mm512_store_ps(tmpptr + 16 * 2, _r2);
_mm512_store_ps(tmpptr + 16 * 3, _r3);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 64;
}
}
for (; i + 1 < tiles; i += 2)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
// transpose 16x2
__m512 _r0 = _mm512_load_ps(r0);
__m512 _r1 = _mm512_load_ps(r0 + 16);
__m512 _tmp0 = _mm512_unpacklo_ps(_r0, _r1);
__m512 _tmp1 = _mm512_unpackhi_ps(_r0, _r1);
__m512 _tmp2 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(2, 0, 2, 0));
__m512 _tmp3 = _mm512_shuffle_f32x4(_tmp0, _tmp1, _MM_SHUFFLE(3, 1, 3, 1));
_r0 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(2, 0, 2, 0));
_r1 = _mm512_shuffle_f32x4(_tmp2, _tmp3, _MM_SHUFFLE(3, 1, 3, 1));
_mm512_store_ps(tmpptr, _r0);
_mm512_store_ps(tmpptr + 16, _r1);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 32;
}
}
for (; i < tiles; i++)
{
float* tmpptr = tm2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 16;
for (int q = 0; q < inch; q++)
{
__m512 _val = _mm512_load_ps(r0);
_mm512_store_ps(tmpptr, _val);
r0 += bottom_blob_tm.cstep * 16;
tmpptr += 16;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u * elempack, elempack, opt.workspace_allocator);
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
float* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
for (; i + 11 < tiles; i += 12)
{
const float* r0 = bb2.row(i / 12);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
__m512 _sum8 = _mm512_setzero_ps();
__m512 _sum9 = _mm512_setzero_ps();
__m512 _suma = _mm512_setzero_ps();
__m512 _sumb = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
__m512 _val8 = _mm512_set1_ps(r0[8]);
__m512 _val9 = _mm512_set1_ps(r0[9]);
_sum8 = _mm512_fmadd_ps(_val8, _w0, _sum8);
_sum9 = _mm512_fmadd_ps(_val9, _w0, _sum9);
__m512 _vala = _mm512_set1_ps(r0[10]);
__m512 _valb = _mm512_set1_ps(r0[11]);
_suma = _mm512_fmadd_ps(_vala, _w0, _suma);
_sumb = _mm512_fmadd_ps(_valb, _w0, _sumb);
r0 += 12;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
_mm512_store_ps(output0_tm + 16 * 8, _sum8);
_mm512_store_ps(output0_tm + 16 * 9, _sum9);
_mm512_store_ps(output0_tm + 16 * 10, _suma);
_mm512_store_ps(output0_tm + 16 * 11, _sumb);
output0_tm += 16 * 12;
}
for (; i + 7 < tiles; i += 8)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
__m512 _sum4 = _mm512_setzero_ps();
__m512 _sum5 = _mm512_setzero_ps();
__m512 _sum6 = _mm512_setzero_ps();
__m512 _sum7 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
__m512 _val4 = _mm512_set1_ps(r0[4]);
__m512 _val5 = _mm512_set1_ps(r0[5]);
_sum4 = _mm512_fmadd_ps(_val4, _w0, _sum4);
_sum5 = _mm512_fmadd_ps(_val5, _w0, _sum5);
__m512 _val6 = _mm512_set1_ps(r0[6]);
__m512 _val7 = _mm512_set1_ps(r0[7]);
_sum6 = _mm512_fmadd_ps(_val6, _w0, _sum6);
_sum7 = _mm512_fmadd_ps(_val7, _w0, _sum7);
r0 += 8;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
_mm512_store_ps(output0_tm + 16 * 4, _sum4);
_mm512_store_ps(output0_tm + 16 * 5, _sum5);
_mm512_store_ps(output0_tm + 16 * 6, _sum6);
_mm512_store_ps(output0_tm + 16 * 7, _sum7);
output0_tm += 16 * 8;
}
for (; i + 3 < tiles; i += 4)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
__m512 _sum2 = _mm512_setzero_ps();
__m512 _sum3 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
__m512 _val2 = _mm512_set1_ps(r0[2]);
__m512 _val3 = _mm512_set1_ps(r0[3]);
_sum2 = _mm512_fmadd_ps(_val2, _w0, _sum2);
_sum3 = _mm512_fmadd_ps(_val3, _w0, _sum3);
r0 += 4;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
_mm512_store_ps(output0_tm + 16 * 2, _sum2);
_mm512_store_ps(output0_tm + 16 * 3, _sum3);
output0_tm += 16 * 4;
}
for (; i + 1 < tiles; i += 2)
{
const float* r0 = bb2.row(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2);
const float* k0 = kernel0_tm.row(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
__m512 _sum1 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
__m512 _val1 = _mm512_set1_ps(r0[1]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
_sum1 = _mm512_fmadd_ps(_val1, _w0, _sum1);
r0 += 2;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
_mm512_store_ps(output0_tm + 16, _sum1);
output0_tm += 16 * 2;
}
for (; i < tiles; i++)
{
const float* r0 = bb2.row<const float>(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2);
const float* k0 = kernel0_tm.row<const float>(r);
int nn = inch * 16; // inch always > 0
__m512 _sum0 = _mm512_setzero_ps();
for (int j = 0; j < nn; j++)
{
__m512 _w0 = _mm512_load_ps(k0);
__m512 _val0 = _mm512_set1_ps(r0[0]);
_sum0 = _mm512_fmadd_ps(_val0, _w0, _sum0);
r0 += 1;
k0 += 16;
}
_mm512_store_ps(output0_tm, _sum0);
output0_tm += 16;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, elemsize, elempack, opt.workspace_allocator);
}
{
conv3x3s1_winograd43_transform_output_pack16_avx512(top_blob_tm, top_blob_bordered, bias, opt);
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
common.h | #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_
#define LIGHTGBM_UTILS_COMMON_FUN_H_
#include <LightGBM/utils/log.h>
#include <LightGBM/utils/openmp_wrapper.h>
#include <cstdio>
#include <string>
#include <vector>
#include <sstream>
#include <cstdint>
#include <algorithm>
#include <cmath>
#include <functional>
#include <memory>
#include <iterator>
#include <type_traits>
#include <iomanip>
#ifdef _MSC_VER
#include "intrin.h"
#endif
namespace LightGBM {
namespace Common {
inline static char tolower(char in) {
if (in <= 'Z' && in >= 'A')
return in - ('Z' - 'z');
return in;
}
inline static std::string Trim(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1);
str.erase(0, str.find_first_not_of(" \f\n\r\t\v"));
return str;
}
inline static std::string RemoveQuotationSymbol(std::string str) {
if (str.empty()) {
return str;
}
str.erase(str.find_last_not_of("'\"") + 1);
str.erase(0, str.find_first_not_of("'\""));
return str;
}
inline static bool StartsWith(const std::string& str, const std::string prefix) {
if (str.substr(0, prefix.size()) == prefix) {
return true;
} else {
return false;
}
}
inline static std::vector<std::string> Split(const char* c_str, char delimiter) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == delimiter) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> SplitLines(const char* c_str) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
if (str[pos] == '\n' || str[pos] == '\r') {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
// skip the line endings
while (str[pos] == '\n' || str[pos] == '\r') ++pos;
// new begin
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) {
std::vector<std::string> ret;
std::string str(c_str);
size_t i = 0;
size_t pos = 0;
while (pos < str.length()) {
bool met_delimiters = false;
for (int j = 0; delimiters[j] != '\0'; ++j) {
if (str[pos] == delimiters[j]) {
met_delimiters = true;
break;
}
}
if (met_delimiters) {
if (i < pos) {
ret.push_back(str.substr(i, pos - i));
}
++pos;
i = pos;
} else {
++pos;
}
}
if (i < pos) {
ret.push_back(str.substr(i));
}
return ret;
}
template<typename T>
inline static const char* Atoi(const char* p, T* out) {
int sign;
T value;
while (*p == ' ') {
++p;
}
sign = 1;
if (*p == '-') {
sign = -1;
++p;
} else if (*p == '+') {
++p;
}
for (value = 0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10 + (*p - '0');
}
*out = static_cast<T>(sign * value);
while (*p == ' ') {
++p;
}
return p;
}
template<typename T>
inline static double Pow(T base, int power) {
if (power < 0) {
return 1.0 / Pow(base, -power);
} else if (power == 0) {
return 1;
} else if (power % 2 == 0) {
return Pow(base*base, power / 2);
} else if (power % 3 == 0) {
return Pow(base*base*base, power / 3);
} else {
return base * Pow(base, power - 1);
}
}
inline static const char* Atof(const char* p, double* out) {
int frac;
double sign, value, scale;
*out = NAN;
// Skip leading white space, if any.
while (*p == ' ') {
++p;
}
// Get sign, if any.
sign = 1.0;
if (*p == '-') {
sign = -1.0;
++p;
} else if (*p == '+') {
++p;
}
// is a number
if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') {
// Get digits before decimal point or exponent, if any.
for (value = 0.0; *p >= '0' && *p <= '9'; ++p) {
value = value * 10.0 + (*p - '0');
}
// Get digits after decimal point, if any.
if (*p == '.') {
double right = 0.0;
int nn = 0;
++p;
while (*p >= '0' && *p <= '9') {
right = (*p - '0') + right * 10.0;
++nn;
++p;
}
value += right / Pow(10.0, nn);
}
// Handle exponent, if any.
frac = 0;
scale = 1.0;
if ((*p == 'e') || (*p == 'E')) {
uint32_t expon;
// Get sign of exponent, if any.
++p;
if (*p == '-') {
frac = 1;
++p;
} else if (*p == '+') {
++p;
}
// Get digits of exponent, if any.
for (expon = 0; *p >= '0' && *p <= '9'; ++p) {
expon = expon * 10 + (*p - '0');
}
if (expon > 308) expon = 308;
// Calculate scaling factor.
while (expon >= 50) { scale *= 1E50; expon -= 50; }
while (expon >= 8) { scale *= 1E8; expon -= 8; }
while (expon > 0) { scale *= 10.0; expon -= 1; }
}
// Return signed and scaled floating point result.
*out = sign * (frac ? (value / scale) : (value * scale));
} else {
size_t cnt = 0;
while (*(p + cnt) != '\0' && *(p + cnt) != ' '
&& *(p + cnt) != '\t' && *(p + cnt) != ','
&& *(p + cnt) != '\n' && *(p + cnt) != '\r'
&& *(p + cnt) != ':') {
++cnt;
}
if (cnt > 0) {
std::string tmp_str(p, cnt);
std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower);
if (tmp_str == std::string("na") || tmp_str == std::string("nan") ||
tmp_str == std::string("null")) {
*out = NAN;
} else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) {
*out = sign * 1e308;
} else {
Log::Fatal("Unknown token %s in data file", tmp_str.c_str());
}
p += cnt;
}
}
while (*p == ' ') {
++p;
}
return p;
}
inline static bool AtoiAndCheck(const char* p, int* out) {
const char* after = Atoi(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static bool AtofAndCheck(const char* p, double* out) {
const char* after = Atof(p, out);
if (*after != '\0') {
return false;
}
return true;
}
inline static unsigned CountDecimalDigit32(uint32_t n) {
#if defined(_MSC_VER) || defined(__GNUC__)
static const uint32_t powers_of_10[] = {
0,
10,
100,
1000,
10000,
100000,
1000000,
10000000,
100000000,
1000000000
};
#ifdef _MSC_VER
unsigned long i = 0;
_BitScanReverse(&i, n | 1);
uint32_t t = (i + 1) * 1233 >> 12;
#elif __GNUC__
uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12;
#endif
return t - (n < powers_of_10[t]) + 1;
#else
if (n < 10) return 1;
if (n < 100) return 2;
if (n < 1000) return 3;
if (n < 10000) return 4;
if (n < 100000) return 5;
if (n < 1000000) return 6;
if (n < 10000000) return 7;
if (n < 100000000) return 8;
if (n < 1000000000) return 9;
return 10;
#endif
}
inline static void Uint32ToStr(uint32_t value, char* buffer) {
const char kDigitsLut[200] = {
'0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9',
'1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9',
'2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9',
'3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9',
'4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9',
'5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9',
'6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9',
'7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9',
'8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9',
'9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9'
};
unsigned digit = CountDecimalDigit32(value);
buffer += digit;
*buffer = '\0';
while (value >= 100) {
const unsigned i = (value % 100) << 1;
value /= 100;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
if (value < 10) {
*--buffer = char(value) + '0';
}
else {
const unsigned i = value << 1;
*--buffer = kDigitsLut[i + 1];
*--buffer = kDigitsLut[i];
}
}
inline static void Int32ToStr(int32_t value, char* buffer) {
uint32_t u = static_cast<uint32_t>(value);
if (value < 0) {
*buffer++ = '-';
u = ~u + 1;
}
Uint32ToStr(u, buffer);
}
inline static void DoubleToStr(double value, char* buffer, size_t
#ifdef _MSC_VER
buffer_len
#endif
) {
#ifdef _MSC_VER
sprintf_s(buffer, buffer_len, "%.17g", value);
#else
sprintf(buffer, "%.17g", value);
#endif
}
inline static const char* SkipSpaceAndTab(const char* p) {
while (*p == ' ' || *p == '\t') {
++p;
}
return p;
}
inline static const char* SkipReturn(const char* p) {
while (*p == '\n' || *p == '\r' || *p == ' ') {
++p;
}
return p;
}
template<typename T, typename T2>
inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) {
std::vector<T2> ret(arr.size());
for (size_t i = 0; i < arr.size(); ++i) {
ret[i] = static_cast<T2>(arr[i]);
}
return ret;
}
template<typename T, bool is_float, bool is_unsign>
struct __TToStringHelperFast {
void operator()(T value, char* buffer, size_t) const {
Int32ToStr(value, buffer);
}
};
template<typename T>
struct __TToStringHelperFast<T, true, false> {
void operator()(T value, char* buffer, size_t
#ifdef _MSC_VER
buf_len
#endif
) const {
#ifdef _MSC_VER
sprintf_s(buffer, buf_len, "%g", value);
#else
sprintf(buffer, "%g", value);
#endif
}
};
template<typename T>
struct __TToStringHelperFast<T, false, true> {
void operator()(T value, char* buffer, size_t) const {
Uint32ToStr(value, buffer);
}
};
template<typename T>
inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
__TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper;
const size_t buf_len = 16;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
helper(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
helper(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) {
if (arr.empty() || n == 0) {
return std::string("");
}
const size_t buf_len = 32;
std::vector<char> buffer(buf_len);
std::stringstream str_buf;
DoubleToStr(arr[0], buffer.data(), buf_len);
str_buf << buffer.data();
for (size_t i = 1; i < std::min(n, arr.size()); ++i) {
DoubleToStr(arr[i], buffer.data(), buf_len);
str_buf << ' ' << buffer.data();
}
return str_buf.str();
}
template<typename T, bool is_float>
struct __StringToTHelper {
T operator()(const std::string& str) const {
T ret = 0;
Atoi(str.c_str(), &ret);
return ret;
}
};
template<typename T>
struct __StringToTHelper<T, true> {
T operator()(const std::string& str) const {
return static_cast<T>(std::stod(str));
}
};
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, char delimiter) {
std::vector<std::string> strs = Split(str.c_str(), delimiter);
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T>
inline static std::vector<T> StringToArray(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
std::vector<std::string> strs = Split(str.c_str(), ' ');
CHECK(strs.size() == static_cast<size_t>(n));
std::vector<T> ret;
ret.reserve(strs.size());
__StringToTHelper<T, std::is_floating_point<T>::value> helper;
for (const auto& s : strs) {
ret.push_back(helper(s));
}
return ret;
}
template<typename T, bool is_float>
struct __StringToTHelperFast {
const char* operator()(const char*p, T* out) const {
return Atoi(p, out);
}
};
template<typename T>
struct __StringToTHelperFast<T, true> {
const char* operator()(const char*p, T* out) const {
double tmp = 0.0f;
auto ret = Atof(p, &tmp);
*out = static_cast<T>(tmp);
return ret;
}
};
template<typename T>
inline static std::vector<T> StringToArrayFast(const std::string& str, int n) {
if (n == 0) {
return std::vector<T>();
}
auto p_str = str.c_str();
__StringToTHelperFast<T, std::is_floating_point<T>::value> helper;
std::vector<T> ret(n);
for (int i = 0; i < n; ++i) {
p_str = helper(p_str, &ret[i]);
}
return ret;
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, const char* delimiter) {
if (strs.empty()) {
return std::string("");
}
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[0];
for (size_t i = 1; i < strs.size(); ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
template<typename T>
inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) {
if (end - start <= 0) {
return std::string("");
}
start = std::min(start, static_cast<size_t>(strs.size()) - 1);
end = std::min(end, static_cast<size_t>(strs.size()));
std::stringstream str_buf;
str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2);
str_buf << strs[start];
for (size_t i = start + 1; i < end; ++i) {
str_buf << delimiter;
str_buf << strs[i];
}
return str_buf.str();
}
inline static int64_t Pow2RoundUp(int64_t x) {
int64_t t = 1;
for (int i = 0; i < 64; ++i) {
if (t >= x) {
return t;
}
t <<= 1;
}
return 0;
}
/*!
* \brief Do inplace softmax transformaton on p_rec
* \param p_rec The input/output vector of the values.
*/
inline static void Softmax(std::vector<double>* p_rec) {
std::vector<double> &rec = *p_rec;
double wmax = rec[0];
for (size_t i = 1; i < rec.size(); ++i) {
wmax = std::max(rec[i], wmax);
}
double wsum = 0.0f;
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] = std::exp(rec[i] - wmax);
wsum += rec[i];
}
for (size_t i = 0; i < rec.size(); ++i) {
rec[i] /= static_cast<double>(wsum);
}
}
inline static void Softmax(const double* input, double* output, int len) {
double wmax = input[0];
for (int i = 1; i < len; ++i) {
wmax = std::max(input[i], wmax);
}
double wsum = 0.0f;
for (int i = 0; i < len; ++i) {
output[i] = std::exp(input[i] - wmax);
wsum += output[i];
}
for (int i = 0; i < len; ++i) {
output[i] /= static_cast<double>(wsum);
}
}
template<typename T>
std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) {
std::vector<const T*> ret;
for (size_t i = 0; i < input.size(); ++i) {
ret.push_back(input.at(i).get());
}
return ret;
}
template<typename T1, typename T2>
inline static void SortForPair(std::vector<T1>& keys, std::vector<T2>& values, size_t start, bool is_reverse = false) {
std::vector<std::pair<T1, T2>> arr;
for (size_t i = start; i < keys.size(); ++i) {
arr.emplace_back(keys[i], values[i]);
}
if (!is_reverse) {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first < b.first;
});
} else {
std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) {
return a.first > b.first;
});
}
for (size_t i = start; i < arr.size(); ++i) {
keys[i] = arr[i].first;
values[i] = arr[i].second;
}
}
template <typename T>
inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>& data) {
std::vector<T*> ptr(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ptr[i] = data[i].data();
}
return ptr;
}
template <typename T>
inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) {
std::vector<int> ret(data.size());
for (size_t i = 0; i < data.size(); ++i) {
ret[i] = static_cast<int>(data[i].size());
}
return ret;
}
inline static double AvoidInf(double x) {
if (x >= 1e300) {
return 1e300;
} else if (x <= -1e300) {
return -1e300;
} else {
return x;
}
}
inline static float AvoidInf(float x) {
if (x >= 1e38) {
return 1e38f;
} else if (x <= -1e38) {
return -1e38f;
} else {
return x;
}
}
template<typename _Iter> inline
static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) {
return (0);
}
template<typename _RanIt, typename _Pr, typename _VTRanIt> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) {
size_t len = _Last - _First;
const size_t kMinInnerLen = 1024;
int num_threads = 1;
#pragma omp parallel
#pragma omp master
{
num_threads = omp_get_num_threads();
}
if (len <= kMinInnerLen || num_threads <= 1) {
std::sort(_First, _Last, _Pred);
return;
}
size_t inner_size = (len + num_threads - 1) / num_threads;
inner_size = std::max(inner_size, kMinInnerLen);
num_threads = static_cast<int>((len + inner_size - 1) / inner_size);
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < num_threads; ++i) {
size_t left = inner_size*i;
size_t right = left + inner_size;
right = std::min(right, len);
if (right > left) {
std::sort(_First + left, _First + right, _Pred);
}
}
// Buffer for merge.
std::vector<_VTRanIt> temp_buf(len);
_RanIt buf = temp_buf.begin();
size_t s = inner_size;
// Recursive merge
while (s < len) {
int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2));
#pragma omp parallel for schedule(static, 1)
for (int i = 0; i < loop_size; ++i) {
size_t left = i * 2 * s;
size_t mid = left + s;
size_t right = mid + s;
right = std::min(len, right);
if (mid >= right) { continue; }
std::copy(_First + left, _First + mid, buf + left);
std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred);
}
s *= 2;
}
}
template<typename _RanIt, typename _Pr> inline
static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) {
return ParallelSort(_First, _Last, _Pred, IteratorValType(_First));
}
// Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not
template <typename T>
inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) {
auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) {
std::ostringstream os;
os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]";
Log::Fatal(os.str().c_str(), callername, i);
};
for (int i = 1; i < ny; i += 2) {
if (y[i - 1] < y[i]) {
if (y[i - 1] < ymin) {
fatal_msg(i - 1);
} else if (y[i] > ymax) {
fatal_msg(i);
}
} else {
if (y[i - 1] > ymax) {
fatal_msg(i - 1);
} else if (y[i] < ymin) {
fatal_msg(i);
}
}
}
if (ny & 1) { // odd
if (y[ny - 1] < ymin || y[ny - 1] > ymax) {
fatal_msg(ny - 1);
}
}
}
// One-pass scan over array w with nw elements: find min, max and sum of elements;
// this is useful for checking weight requirements.
template <typename T1, typename T2>
inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) {
T1 minw;
T1 maxw;
T1 sumw;
int i;
if (nw & 1) { // odd
minw = w[0];
maxw = w[0];
sumw = w[0];
i = 2;
} else { // even
if (w[0] < w[1]) {
minw = w[0];
maxw = w[1];
} else {
minw = w[1];
maxw = w[0];
}
sumw = w[0] + w[1];
i = 3;
}
for (; i < nw; i += 2) {
if (w[i - 1] < w[i]) {
minw = std::min(minw, w[i - 1]);
maxw = std::max(maxw, w[i]);
} else {
minw = std::min(minw, w[i]);
maxw = std::max(maxw, w[i - 1]);
}
sumw += w[i - 1] + w[i];
}
if (mi != nullptr) {
*mi = minw;
}
if (ma != nullptr) {
*ma = maxw;
}
if (su != nullptr) {
*su = static_cast<T2>(sumw);
}
}
template<typename T>
inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) {
std::vector<uint32_t> ret;
for (int i = 0; i < n; ++i) {
int i1 = vals[i] / 32;
int i2 = vals[i] % 32;
if (static_cast<int>(ret.size()) < i1 + 1) {
ret.resize(i1 + 1, 0);
}
ret[i1] |= (1 << i2);
}
return ret;
}
template<typename T>
inline static bool FindInBitset(const uint32_t* bits, int n, T pos) {
int i1 = pos / 32;
if (i1 >= n) {
return false;
}
int i2 = pos % 32;
return (bits[i1] >> i2) & 1;
}
inline static bool CheckDoubleEqualOrdered(double a, double b) {
double upper = std::nextafter(a, INFINITY);
return b <= upper;
}
inline static double GetDoubleUpperBound(double a) {
return std::nextafter(a, INFINITY);;
}
inline static size_t GetLine(const char* str) {
auto start = str;
while (*str != '\0' && *str != '\n' && *str != '\r') {
++str;
}
return str - start;
}
inline static const char* SkipNewLine(const char* str) {
if (*str == '\r') {
++str;
}
if (*str == '\n') {
++str;
}
return str;
}
template <typename T>
static int Sign(T x) {
return (x > T(0)) - (x < T(0));
}
template <typename T>
static T SafeLog(T x) {
if (x > 0) {
return std::log(x);
} else {
return -INFINITY;
}
}
} // namespace Common
} // namespace LightGBM
#endif // LightGBM_UTILS_COMMON_FUN_H_
|
10_omp_empty.c | // clang-format off
// RUN: %c-to-llvm -fno-discard-value-names %omp_c_flags %s | %apply-typeart -typeart-alloca -call-filter -S 2>&1 | FileCheck %s
// REQUIRES: openmp
// clang-format on
#include "omp.h"
// CHECK-NOT: {{.*}} __typeart_alloc
void foo(int* x) {
#pragma omp parallel // transformed to @__kmpc_fork_call
{ *x = -1; }
#pragma omp parallel for
for (int i = 0; i < x[10]; ++i) {
x[i] = i;
}
}
// Standard filter
// CHECK: > Stack Memory
// CHECK-NEXT: Alloca :
// CHECK-NEXT: Stack call filtered % : 100.00 |
EmbeddingBag.h | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Dhiraj Kalamkar, Evangelos Georganas (Intel Corp.)
******************************************************************************/
#if defined(USE_LIBXSMM_JIT)
#include <libxsmm.h>
#endif
#include "utils.h"
#include "rtm.h"
template <typename T>
class EmbeddingBagImpl
{
public:
EmbeddingBagImpl(long M, long E) : M(M), E(E)
{
#ifdef USE_LIBXSMM_JIT
libxsmm_meltw_unary_shape unary_shape_f32 = libxsmm_create_meltw_unary_shape( E, 0, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
libxsmm_meltw_unary_shape unary_shape_f16 = libxsmm_create_meltw_unary_shape( E, 0, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
libxsmm_meltw_binary_shape binary_shape_f32 = libxsmm_create_meltw_binary_shape( E, 1, _ld, _ld, _ld, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32, LIBXSMM_DATATYPE_F32 );
weight_ = (T*)my_malloc((size_t)M * E * sizeof(T), alignment);
_ld = E;
if (sizeof(T) == 4) {
kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX, unary_shape_f32, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES );
} else {
kernel = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REDUCE_COLS_IDX, unary_shape_f16, (sizeof(long) == 8) ? LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_8BYTES : LIBXSMM_MELTW_FLAG_UNARY_IDX_SIZE_4BYTES );
}
kernel1 = libxsmm_dispatch_meltw_unary_v2( LIBXSMM_MELTW_TYPE_UNARY_REPLICATE_COL_VAR, unary_shape_f32, LIBXSMM_MELTW_FLAG_UNARY_NONE );
kernel2 = libxsmm_dispatch_meltw_binary_v2( LIBXSMM_MELTW_TYPE_BINARY_MULADD, binary_shape_f32, LIBXSMM_MELTW_FLAG_BINARY_BCAST_SCALAR_IN_0 );
#endif
}
~EmbeddingBagImpl()
{
my_free(weight_);
weight_ = 0;
}
void init(T low = -0.1, T high = 0.1)
{
init_random(M * E, weight_, low, high);
}
#ifdef USE_LIBXSMM_JIT
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (int n = 0; n < N; n++)
{
libxsmm_meltw_unary_param params;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long __n = end-start;
params.in.primary = weight;
params.in.secondary = (void*)&indices[start];
params.in.tertiary = &__n;
params.out.primary = &output[n][0];
kernel( ¶ms );
}
}
#else
void forward(long N, long NS, const long *offsets, const long *indices, T *output_)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict output)[E] = (T(*)[*])output_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
#pragma omp simd
for (long v = 0; v < E; v++)
output[n][v] = 0;
for (long s = start; s < end; s++)
{
auto ind = indices[s];
#pragma omp simd
for (long v = 0; v < E; v++)
{
output[n][v] += weight[ind][v];
}
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
int _ld = E;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
libxsmm_meltw_unary_param unary_param;
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
unsigned long long _N = end-start;
unary_param.in.primary = (void*)&gradout[n][0];
unary_param.out.primary = (void*)&values[start][0];
unary_param.op.primary = (void*)&_N;
kernel1(&unary_param);
}
}
#else
void backward(long N, long NS, const T *gradout_, const long *offsets, const long *indices, T *values_)
{
T(*__restrict gradout)[E] = (T(*)[*])gradout_;
T(*__restrict values)[E] = (T(*)[*])values_;
#pragma omp parallel for
for (long n = 0; n < N; n++)
{
auto start = offsets[n];
auto end = (n < N - 1 ? offsets[n + 1] : NS);
for (long s = start; s < end; s++)
{
#pragma omp simd
#ifdef STREAMING_WRITES
#pragma vector nontemporal(values)
#endif
for (long v = 0; v < E; v++)
values[s][v] = gradout[n][v];
}
}
}
#endif
#ifdef USE_LIBXSMM_JIT
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
int use_lock_free = use_rtm == 0 ? 1: 0;
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int _ld = E;
if(use_lock_free) {
/*printf("Using lock free update\n");*/
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
libxsmm_meltw_binary_param binary_param;
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
kernel2(&binary_param);
}
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
libxsmm_meltw_binary_param binary_param;
long ind = indices[i];
binary_param.in0.primary = (void*)&lr;
binary_param.in1.primary = (void*)&grads[i][0];
binary_param.out.primary = (void*)&weight[ind][0];
{
TransactionScope guard(fallBackLock, 100, 0);
kernel2(&binary_param);
}
}
}
}
#else
void update(long NS, const T *grads_, const long *indices, float lr, long M, int use_rtm)
{
T(*__restrict weight)[E] = (T(*)[*])weight_;
T(*__restrict grads)[E] = (T(*)[*])grads_;
int use_lock_free = use_rtm == 0 ? 1: 0;
if(use_lock_free) {
int max_thr = omp_get_max_threads();
if(M < max_thr) max_thr = M;
#pragma omp parallel num_threads(max_thr)
{
int tid = omp_get_thread_num();
for(long i = 0; i < NS; i++) {
auto ind = indices[i];
if(ind % max_thr == tid) {
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
} else {
SimpleSpinLock fallBackLock;
#pragma omp parallel for
for (long i = 0; i < NS; i++)
{
long ind = indices[i];
{
TransactionScope guard(fallBackLock, 100, 0);
#pragma omp simd
for (long v = 0; v < E; v++)
weight[ind][v] += lr * grads[i][v];
}
}
}
}
#endif
T *weight_;
long M;
long E;
#ifdef USE_LIBXSMM_JIT
int _ld;
libxsmm_meltwfunction_unary kernel;
libxsmm_meltwfunction_unary kernel1;
libxsmm_meltwfunction_binary kernel2;
#endif
};
|
fatorial.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
long fat(int n) {
long res;
int i;
res = 1;
#pragma omp parallel for reduction(*:res)
for(i = 2; i <= n; i++){
res *= i;
}
return res;
}
int main(int argc, char **argv) {
int n;
long resultado;
if(argc < 2){
printf("uso ./fatorial <numero natural>\n");
exit(1);
}
n = atoi(argv[1]);
if(n < 0){
printf("Erro! Numero de entrada não é natural\n");
exit(1);
}
// omp_set_num_threads(1);
printf("Calculando fatorial de %d com %d threads.\n", n, omp_get_max_threads());
resultado = fat(n);
printf("fatorial(%d) = %ld\n", n, resultado);
return 0;
}
|
swapCheck3.c | int main() {
int A = 5;
int B = 10;
int C;
int D = 0;
#pragma omp parallel
{
#pragma omp atomic
D = D + 1;
int x = 11;
while (1) {
#pragma omp single nowait
{
l1: C = A;
l2: A = B;
l3: B = C;
}
#pragma omp barrier
#pragma omp master
{
l4: D = D + A + B;
}
#pragma omp barrier
#pragma omp single nowait
{
l5: C = A;
l6: A = B;
l7: B = C;
}
#pragma omp barrier
#pragma omp master
{
l8: D = D + A + B;
}
#pragma omp barrier
#pragma omp single nowait
{
l9: C = A;
l10: A = B;
l11: B = C;
}
#pragma omp barrier
#pragma omp master
{
l12: D = D + A + B;
}
#pragma omp barrier
#pragma omp single nowait
{
l13: C = A;
l14: A = B;
l15: B = C;
}
#pragma omp barrier
#pragma omp master
{
l16: D = D + A + B;
}
#pragma omp barrier
if (x > 10) {
break;
}
}
}
}
|
segment_reduce.h | /*!
* Copyright (c) 2020 by Contributors
* \file array/cpu/spmm.h
* \brief Segment reduce kernel function header.
*/
#ifndef DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
#define DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
#include <dgl/array.h>
namespace dgl {
namespace aten {
namespace cpu {
template <typename IdType, typename DType>
void SegmentSum(NDArray feat, NDArray offsets, NDArray out) {
int n = out->shape[0];
int dim = 1;
for (int i = 1; i < out->ndim; ++i)
dim *= out->shape[i];
const DType* feat_data = feat.Ptr<DType>();
const IdType* offsets_data = offsets.Ptr<IdType>();
DType *out_data = out.Ptr<DType>();
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
for (IdType j = offsets_data[i]; j < offsets_data[i + 1]; ++j) {
for (int k = 0; k < dim; ++k) {
out_data[i * dim + k] += feat_data[j * dim + k];
}
}
}
}
template <typename IdType, typename DType, typename Cmp>
void SegmentCmp(NDArray feat, NDArray offsets,
NDArray out, NDArray arg) {
int n = out->shape[0];
int dim = 1;
for (int i = 1; i < out->ndim; ++i)
dim *= out->shape[i];
const DType* feat_data = feat.Ptr<DType>();
const IdType* offsets_data = offsets.Ptr<IdType>();
DType *out_data = out.Ptr<DType>();
IdType *arg_data = arg.Ptr<IdType>();
std::fill(out_data, out_data + out.NumElements(), Cmp::zero);
std::fill(arg_data, arg_data + arg.NumElements(), -1);
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
for (IdType j = offsets_data[i]; j < offsets_data[i + 1]; ++j) {
for (int k = 0; k < dim; ++k) {
const DType val = feat_data[j * dim + k];
if (Cmp::Call(out_data[i * dim + k], val)) {
out_data[i * dim + k] = val;
arg_data[i * dim + k] = j;
}
}
}
}
}
template <typename IdType, typename DType>
void BackwardSegmentCmp(NDArray feat, NDArray arg, NDArray out) {
int n = feat->shape[0];
int dim = 1;
for (int i = 1; i < out->ndim; ++i)
dim *= out->shape[i];
const DType* feat_data = feat.Ptr<DType>();
const IdType* arg_data = arg.Ptr<IdType>();
DType* out_data = out.Ptr<DType>();
#pragma omp parallel for
for (int i = 0; i < n; ++i) {
for (int k = 0; k < dim; ++k) {
int write_row = arg_data[i * dim + k];
if (write_row >= 0)
out_data[write_row * dim + k] = feat_data[i * dim + k];
}
}
}
} // namespace cpu
} // namespace aten
} // namespace dgl
#endif // DGL_ARRAY_CPU_SEGMENT_REDUCE_H_
|
mlp_mnist.c | /******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/libxsmm/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Evangelos Georganas (Intel Corp.)
******************************************************************************/
#include <libxsmm_dnn.h>
#include <dnn_common.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <math.h>
#if defined(_OPENMP)
# include <omp.h>
#endif
#define TEST_ACCURACY
/* include c-based dnn library */
#include "../../datasets/mnist/mnist.h"
LIBXSMM_INLINE void my_init_buf_mlp(float* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf(buf, size);
for (i = 0; i < (int)size; ++i) {
buf[i] = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
}
}
LIBXSMM_INLINE void my_init_buf_mlp_bf16(libxsmm_bfloat16* buf, size_t size, int initPos, int initOne)
{
int i;
zero_buf_bf16(buf, size);
for (i = 0; i < (int)size; ++i) {
libxsmm_bfloat16_f32 tmp;
tmp.f = (float)((initOne != 0) ? 1.0 : ((initPos != 0) ? libxsmm_rng_f64() : (0.05 - libxsmm_rng_f64()/10.0)));
buf[i] = tmp.i[1];
}
}
int main(int argc, char* argv[])
{
float **act_libxsmm, **fil_libxsmm, **delact_libxsmm, **delfil_libxsmm;
float **bias_libxsmm, **delbias_libxsmm;
libxsmm_bfloat16 **act_libxsmm_bf16, **fil_libxsmm_bf16, **delact_libxsmm_bf16, **delfil_libxsmm_bf16;
libxsmm_bfloat16 **bias_libxsmm_bf16, **delbias_libxsmm_bf16;
unsigned char **relumask_libxsmm;
int *label_libxsmm;
libxsmm_datatype in_dt, out_dt, comp_dt;
libxsmm_dnn_fc_eltw_fuse my_fuse;
libxsmm_dnn_fc_fwd_config* libxsmm_dnn_fc_fwd;
libxsmm_dnn_fc_bwd_config* libxsmm_dnn_fc_bwd;
libxsmm_dnn_opt_config* libxsmm_dnn_opt;
libxsmm_dnn_smax_fwd_config libxsmm_dnn_smax_fwd;
libxsmm_dnn_smax_bwd_config libxsmm_dnn_smax_bwd;
void* scratch = NULL;
size_t scratch_size = 0;
/* some parameters we can overwrite via cli,
default is some inner layer of overfeat */
int iters = 10; /* repetitions of benchmark */
int MB = 256; /* mini-batch size, "N" */
int bn = 32;
int bk = 32;
int bc = 32;
int *C; /* number of input feature maps, "C" */
int num_layers = 0;
int prec_bf16 = 0;
#if defined(_OPENMP)
int nThreads = omp_get_max_threads(); /* number of threads */
#else
int nThreads = 1; /* number of threads */
#endif
unsigned long long l_start, l_end;
double l_total = 0.0;
double gflop = 0.0;
int i, j;
double fil_size = 0.0;
double act_size = 0.0;
float lr = 0.1f;
float loss_weight = 1.0f;
float loss = 0.0;
libxsmm_matdiff_info norms_fwd, norms_bwd, norms_upd, diff;
libxsmm_matdiff_clear(&norms_fwd);
libxsmm_matdiff_clear(&norms_bwd);
libxsmm_matdiff_clear(&norms_upd);
libxsmm_matdiff_clear(&diff);
act_libxsmm = NULL;
fil_libxsmm = NULL;
delact_libxsmm = NULL;
delfil_libxsmm = NULL;
bias_libxsmm = NULL;
delbias_libxsmm = NULL;
act_libxsmm_bf16 = NULL;
fil_libxsmm_bf16 = NULL;
delact_libxsmm_bf16 = NULL;
delfil_libxsmm_bf16 = NULL;
bias_libxsmm_bf16 = NULL;
delbias_libxsmm_bf16 = NULL;
relumask_libxsmm = NULL;
label_libxsmm = NULL;
if (argc > 1 && !strncmp(argv[1], "-h", 3)) {
printf("Usage: %s iters MB bn bk bc prec_bf16 [C1 C2 ... CN-1]\n", argv[0]);
return 0;
}
libxsmm_rng_set_seed(1);
/* reading new values from cli */
i = 1;
num_layers = ( argc > 7 ) ? (argc - 8) + 2 : 1;
if (argc > i) iters = atoi(argv[i++]);
if (argc > i) MB = atoi(argv[i++]);
if (argc > i) bn = atoi(argv[i++]);
if (argc > i) bk = atoi(argv[i++]);
if (argc > i) bc = atoi(argv[i++]);
if (argc > i) prec_bf16 = atoi(argv[i++]);
/* allocate the number of channles buffer */
C = (int*)malloc((num_layers+2)*sizeof(int));
C[0] = 784;
if ( argc > 6 ) {
for (j = 1 ; i < argc; ++i, ++j ) {
C[j] = atoi(argv[i]);
}
} else {
j=1;
}
C[j] = 10;
/* handle softmax config */
C[num_layers+1] = C[num_layers];
#if defined(__SSE3__)
_MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON);
_MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON);
_MM_SET_ROUNDING_MODE(_MM_ROUND_NEAREST);
#endif
if ( prec_bf16 > 0 ) {
in_dt = LIBXSMM_DATATYPE_BF16;
out_dt = LIBXSMM_DATATYPE_BF16;
comp_dt = LIBXSMM_DATATYPE_BF16;
} else {
in_dt = LIBXSMM_DATATYPE_F32;
out_dt = LIBXSMM_DATATYPE_F32;
comp_dt = LIBXSMM_DATATYPE_F32;
}
/* print some summary */
printf("##########################################\n");
printf("# Setting Up (Common) #\n");
printf("##########################################\n");
printf("PARAMS: N:%d\n", MB);
printf("PARAMS: Layers: %d\n", num_layers);
printf("PARAMS: ITERS:%d", iters); printf(" Threads:%d\n", nThreads);
for (i = 0; i < num_layers; ++i ) {
if (i == 0) {
act_size += (double)(MB*C[i]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0);
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i, MB, C[i], (double)(MB*C[i]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[i+1]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0);
fil_size += (double)(C[i]*C[i+1]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0);
printf("SIZE Filter %i (%dx%d): %10.2f MiB\n", i, C[i], C[i+1], (double)(C[i]*C[i+1]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0) );
printf("SIZE Activations %i (%dx%d): %10.2f MiB\n", i+1, MB, C[i+1], (double)(MB*C[i+1]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0) );
}
act_size += (double)(MB*C[num_layers+1]*sizeof(float))/(1024.0*1024.0);
printf("SIZE Activations softmax (%dx%d): %10.2f MiB\n", MB, C[num_layers+1], (double)(MB*C[num_layers+1]*LIBXSMM_TYPESIZE(in_dt))/(1024.0*1024.0) );
printf("\nTOTAL SIZE Activations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE Filter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE delActivations: %10.2f MiB\n", act_size );
printf("TOTAL SIZE delFilter: %10.2f MiB\n", fil_size );
printf("TOTAL SIZE MLP: %10.2f MiB\n", (2.0*fil_size) + (2.0*act_size) );
/* allocate data */
/* +2 because of the softwax layer */
if ( prec_bf16 == 0 ) {
act_libxsmm = (float**)malloc( (num_layers+2)*sizeof(float*) );
delact_libxsmm = (float**)malloc( (num_layers+1)*sizeof(float*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
act_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm[i] = (float*)libxsmm_aligned_malloc( MB*C[i]*sizeof(float), 2097152);
}
}
fil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delfil_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
delfil_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
}
bias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
delbias_libxsmm = (float**)malloc( num_layers*sizeof(float*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
delbias_libxsmm[i] = (float*)libxsmm_aligned_malloc( C[i+1]*sizeof(float), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_mlp( act_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_mlp( delact_libxsmm[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( delfil_libxsmm[i], C[i]*C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( bias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp( delbias_libxsmm[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
} else {
/* allocate data */
act_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( (num_layers+2)*sizeof(libxsmm_bfloat16*) );
delact_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( (num_layers+1)*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers+2; ++i ) {
act_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
/* softmax has no incoming gradients */
if ( i < num_layers+1 ) {
delact_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( MB*C[i]*sizeof(libxsmm_bfloat16), 2097152);
}
}
fil_libxsmm = (float**) malloc( num_layers*sizeof(float*) );
fil_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delfil_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
fil_libxsmm[i] = (float*) libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(float), 2097152);
fil_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delfil_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i]*C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
bias_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
delbias_libxsmm_bf16 = (libxsmm_bfloat16**)malloc( num_layers*sizeof(libxsmm_bfloat16*) );
for ( i = 0 ; i < num_layers; ++i ) {
bias_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
delbias_libxsmm_bf16[i] = (libxsmm_bfloat16*)libxsmm_aligned_malloc( C[i+1]*sizeof(libxsmm_bfloat16), 2097152);
}
relumask_libxsmm = (unsigned char**)malloc( num_layers*sizeof(unsigned char*) );
for ( i = 0 ; i < num_layers; ++i ) {
relumask_libxsmm[i] = (unsigned char*)libxsmm_aligned_malloc( MB*C[i+1]*sizeof(unsigned char), 2097152);
}
label_libxsmm = (int*)libxsmm_aligned_malloc( MB*sizeof(int), 2097152);
/* init data */
for ( i = 0 ; i < num_layers+2; ++i ) {
my_init_buf_mlp_bf16( act_libxsmm_bf16[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers+1; ++i ) {
my_init_buf_mlp_bf16( delact_libxsmm_bf16[i], MB*C[i], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
{
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, fil_libxsmm[i], C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( fil_libxsmm[i], fil_libxsmm_bf16[i], C[i]*C[i+1] );
free(cur_fil);
}
#else
my_init_buf_mlp( fil_libxsmm[i], C[i]*C[i+1], 0, 0 );
libxsmm_rne_convert_fp32_bf16( fil_libxsmm[i], fil_libxsmm_bf16[i], C[i]*C[i+1] );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
#if 0
float *cur_fil = (float*) malloc(C[i]*C[i+1]*sizeof(float));
float *cur_fil_vnni = (float*) malloc(C[i]*C[i+1]*sizeof(float));
my_init_buf( cur_fil, C[i]*C[i+1], 0, 0 );
my_matrix_copy_KCCK_to_KCCK_vnni(cur_fil, cur_fil_vnni, C[i], C[i+1], bc, bk);
libxsmm_rne_convert_fp32_bf16( cur_fil_vnni, delfil_libxsmm_bf16[i], C[i]*C[i+1] );
free(cur_fil);
free(cur_fil_vnni);
#else
my_init_buf_mlp_bf16( delfil_libxsmm_bf16[i], C[i]*C[i+1], 0, 0 );
#endif
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp_bf16( bias_libxsmm_bf16[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
my_init_buf_mlp_bf16( delbias_libxsmm_bf16[i], C[i+1], 0, 0 );
}
for ( i = 0 ; i < num_layers; ++i ) {
zero_buf_uint8( relumask_libxsmm[i], MB*C[i+1] );
}
zero_buf_int32( label_libxsmm, MB );
}
printf("\n");
printf("##########################################\n");
printf("# Setting Up (custom-Storage) #\n");
printf("##########################################\n");
/* allocating handles */
libxsmm_dnn_fc_fwd = (libxsmm_dnn_fc_fwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_fwd_config) );
libxsmm_dnn_fc_bwd = (libxsmm_dnn_fc_bwd_config*) malloc( num_layers*sizeof(libxsmm_dnn_fc_bwd_config) );
libxsmm_dnn_opt = (libxsmm_dnn_opt_config*) malloc( num_layers*sizeof(libxsmm_dnn_opt_config) );
/* setting up handles + scratch */
for ( i = 0; i < num_layers; ++i ) {
/* MNIST Specific where everywhere we use relu act except the last layer */
if ( i < num_layers -1) {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_RELU_WITH_MASK;
} else {
my_fuse = LIBXSMM_DNN_FC_ELTW_FUSE_NONE;
}
libxsmm_dnn_fc_fwd[i] = setup_libxsmm_dnn_fc_fwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse, in_dt, out_dt, comp_dt );
libxsmm_dnn_fc_bwd[i] = setup_libxsmm_dnn_fc_bwd(MB, C[i], C[i+1], (MB % bn == 0) ? bn : MB,
(C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, my_fuse, in_dt, out_dt, comp_dt );
libxsmm_dnn_opt[i] = setup_libxsmm_dnn_opt( C[i], C[i+1], (C[i ] % bc == 0) ? bc : C[i ],
(C[i+1] % bk == 0) ? bk : C[i+1],
nThreads, lr, in_dt, out_dt, comp_dt );
/* let's allocate and bind scratch */
if ( libxsmm_dnn_fc_fwd[i].scratch_size > 0 || libxsmm_dnn_fc_bwd[i].scratch_size > 0 || libxsmm_dnn_opt[i].scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( LIBXSMM_MAX( libxsmm_dnn_fc_fwd[i].scratch_size, libxsmm_dnn_fc_bwd[i].scratch_size), libxsmm_dnn_opt[i].scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
}
/* softmax+loss is treated as N+1 layer */
libxsmm_dnn_smax_fwd = setup_libxsmm_dnn_smax_fwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, in_dt, out_dt, comp_dt );
libxsmm_dnn_smax_bwd = setup_libxsmm_dnn_smax_bwd( MB, C[num_layers+1], (MB % bn == 0) ? bn : MB,
(C[num_layers+1] % bk == 0) ? bk : C[num_layers+1],
nThreads, loss_weight, in_dt, out_dt, comp_dt );
if ( libxsmm_dnn_smax_fwd.scratch_size > 0 || libxsmm_dnn_smax_bwd.scratch_size > 0 ) {
size_t alloc_size = LIBXSMM_MAX( libxsmm_dnn_smax_fwd.scratch_size, libxsmm_dnn_smax_bwd.scratch_size );
if ( alloc_size > scratch_size ) {
if ( scratch != NULL ) libxsmm_free( scratch );
scratch_size = alloc_size;
scratch = libxsmm_aligned_malloc( scratch_size, 2097152 );
my_init_buf_mlp( (float*)(scratch), (scratch_size)/4, 0, 0 );
}
}
/* Reading in the MNIST dataset */
int n_batches = NUM_TRAIN/MB, batch_id = 0;
int n_epochs = iters, epoch_id = 0;
float *input_acts = NULL;
libxsmm_bfloat16 *input_acts_bf16 = NULL;
if ( prec_bf16 == 0 ) {
input_acts = (float*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(float), 2097152);
} else {
input_acts_bf16 = (libxsmm_bfloat16*)libxsmm_aligned_malloc( NUM_TRAIN * C[0] * sizeof(libxsmm_bfloat16), 2097152);
}
/* Read in input data */
char *train_image_path = "../../datasets/mnist/train-images.idx3-ubyte";
char *train_label_path = "../../datasets/mnist/train-labels.idx1-ubyte";
char *test_image_path = "../../datasets/mnist/t10k-images.idx3-ubyte";
char *test_label_path = "../../datasets/mnist/t10k-labels.idx1-ubyte";
load_mnist(train_image_path, train_label_path, test_image_path, test_label_path);
/* Format the input layer in NCNC blocked format */
int _i, _j;
for (_i = 0; _i < n_batches*MB; _i++) {
for (_j = 0; _j < C[0]; _j++) {
float val = (float) train_image[_i][_j];
int batchid = _i/MB;
int mb = _i % MB;
int _bn = (MB % bn == 0) ? bn : MB;
int _bc = (C[0] % bc == 0) ? bc : C[0];
if ( prec_bf16 == 0 ) {
float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
*cur_pos = val;
} else {
libxsmm_bfloat16 *cur_pos = input_acts_bf16 + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 );
}
}
}
printf("###########################################\n");
printf("# Training MNIST with %d training samples #\n", n_batches*MB);
printf("###########################################\n");
l_start = libxsmm_timer_tick();
#if defined(_OPENMP)
# pragma omp parallel private(i,j,epoch_id,batch_id)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (epoch_id = 0; epoch_id < n_epochs; epoch_id++) {
for (batch_id = 0; batch_id < n_batches; batch_id++) {
if ( prec_bf16 == 0 ) {
for ( i = 0; i < num_layers; ++i) {
float *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i];
libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB, &loss,
0, tid, scratch );
if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) {
printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss);
}
libxsmm_dnn_smax_bwd_exec_f32( libxsmm_dnn_smax_bwd, delact_libxsmm[num_layers], act_libxsmm[num_layers+1], train_label + batch_id * MB,
0, tid, scratch );
for ( i = num_layers-1; i > 0; --i) {
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[i], fil_libxsmm[i], delact_libxsmm[i], delact_libxsmm[i+1], delfil_libxsmm[i],
act_libxsmm[i], delbias_libxsmm[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch );
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[i], fil_libxsmm[i], delfil_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_fc_bwd_exec_f32( libxsmm_dnn_fc_bwd[0], fil_libxsmm[0], delact_libxsmm[0], delact_libxsmm[0+1], delfil_libxsmm[0],
input_acts + batch_id * MB * C[0], delbias_libxsmm[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch );
libxsmm_dnn_opt_exec_f32( libxsmm_dnn_opt[0], fil_libxsmm[0], delfil_libxsmm[0], 0, tid, scratch );
} else {
for ( i = 0; i < num_layers; ++i) {
libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts_bf16 + batch_id * MB * C[0] : act_libxsmm_bf16[i];
libxsmm_dnn_fc_fwd_exec_bf16( libxsmm_dnn_fc_fwd[i], fil_libxsmm_bf16[i], input_act_ptr, act_libxsmm_bf16[i+1],
bias_libxsmm_bf16[i], relumask_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_smax_fwd_exec_bf16( libxsmm_dnn_smax_fwd, act_libxsmm_bf16[num_layers], act_libxsmm_bf16[num_layers+1], train_label + batch_id * MB, &loss,
0, tid, scratch );
if ((tid == 0) && (batch_id == 0) && (epoch_id % 10 == 0 || epoch_id == n_epochs - 1 )) {
printf("Loss for epoch %d batch_id %d is %f\n", epoch_id, batch_id, loss);
}
libxsmm_dnn_smax_bwd_exec_bf16( libxsmm_dnn_smax_bwd, delact_libxsmm_bf16[num_layers], act_libxsmm_bf16[num_layers+1], train_label + batch_id * MB,
0, tid, scratch );
for ( i = num_layers-1; i > 0; --i) {
libxsmm_dnn_fc_bwd_exec_bf16( libxsmm_dnn_fc_bwd[i], fil_libxsmm_bf16[i], delact_libxsmm_bf16[i], delact_libxsmm_bf16[i+1], delfil_libxsmm_bf16[i],
act_libxsmm_bf16[i], delbias_libxsmm_bf16[i], relumask_libxsmm[i], LIBXSMM_DNN_FC_PASS_BWD, 0, tid, scratch );
libxsmm_dnn_opt_exec_bf16( libxsmm_dnn_opt[i], fil_libxsmm_bf16[i], fil_libxsmm[i], delfil_libxsmm_bf16[i], 0, tid, scratch );
}
libxsmm_dnn_fc_bwd_exec_bf16( libxsmm_dnn_fc_bwd[0], fil_libxsmm_bf16[0], delact_libxsmm_bf16[0], delact_libxsmm_bf16[0+1], delfil_libxsmm_bf16[0],
input_acts_bf16 + batch_id * MB * C[0], delbias_libxsmm_bf16[0], relumask_libxsmm[0], LIBXSMM_DNN_FC_PASS_BWD_W, 0, tid, scratch );
libxsmm_dnn_opt_exec_bf16( libxsmm_dnn_opt[0], fil_libxsmm_bf16[0], fil_libxsmm[0], delfil_libxsmm_bf16[0], 0, tid, scratch );
}
}
}
}
l_end = libxsmm_timer_tick();
l_total = libxsmm_timer_duration(l_start, l_end);
gflop = 0.0;
for ( i = num_layers-1; i > 0; --i) {
gflop += (6.0*(double)MB*(double)C[i]*(double)C[i+1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0);
}
gflop += (4.0*(double)MB*(double)C[0]*(double)C[1]*(double)((double)n_epochs *(double)n_batches)) / (1000.0*1000.0*1000.0);
printf("GFLOP = %.5g\n", gflop/(double)((double)n_epochs *(double)n_batches));
printf("fp time = %.5g\n", ((double)(l_total/((double)n_epochs *(double)n_batches))));
printf("GFLOPS = %.5g\n", gflop/l_total);
printf("PERFDUMP,BP,%s,%i,%i,", LIBXSMM_VERSION, nThreads, MB );
for ( i = 0; i < num_layers; ++i ) {
printf("%i,", C[i] );
}
printf("%f,%f\n", ((double)(l_total/((double)n_epochs *(double)n_batches))), gflop/l_total);
#ifdef TEST_ACCURACY
/* Test accuracy */
n_batches = NUM_TEST/MB;
for (_i = 0; _i < n_batches * MB; _i++) {
for (_j = 0; _j < C[0]; _j++) {
float val = (float) test_image[_i][_j];
int batchid = _i/MB;
int mb = _i % MB;
int _bn = (MB % bn == 0) ? bn : MB;
int _bc = (C[0] % bc == 0) ? bc : C[0];
if ( prec_bf16 == 0 ) {
float *cur_pos = input_acts + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
*cur_pos = val;
} else {
libxsmm_bfloat16 *cur_pos = input_acts_bf16 + batchid * MB *C[0] + (mb / _bn) * C[0] * _bn + (_j / _bc) * _bn * _bc + (mb % _bn) * _bc + (_j % _bc);
libxsmm_rne_convert_fp32_bf16( &val, cur_pos, 1 );
}
}
}
n_batches = NUM_TEST/MB;
unsigned int hits = 0;
unsigned int samples = 0;
#if defined(_OPENMP)
# pragma omp parallel private(i,j,batch_id)
#endif
{
#if defined(_OPENMP)
const int tid = omp_get_thread_num();
#else
const int tid = 0;
#endif
for (batch_id = 0; batch_id < n_batches; batch_id++) {
if ( prec_bf16 == 0 ) {
for ( i = 0; i < num_layers; ++i) {
float *input_act_ptr = (i == 0) ? input_acts + batch_id * MB * C[0] : act_libxsmm[i];
libxsmm_dnn_fc_fwd_exec_f32( libxsmm_dnn_fc_fwd[i], fil_libxsmm[i], input_act_ptr, act_libxsmm[i+1],
bias_libxsmm[i], relumask_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_smax_fwd_exec_f32( libxsmm_dnn_smax_fwd, act_libxsmm[num_layers], act_libxsmm[num_layers+1], test_label + batch_id * MB, &loss,
0, tid, scratch );
if (tid == 0) {
for (_i = 0; _i < MB; _i++) {
int label = *(test_label + batch_id * MB + _i);
int max_id = 0;
float max_val = 0.0;
max_val = *(act_libxsmm[num_layers+1] + _i * 10);
float sum = max_val;
/* Find predicted label */
for (_j = 1; _j < 10; _j++) {
float val = *(act_libxsmm[num_layers+1] + _i * 10 + _j);
sum += val;
if (val > max_val) {
max_id = _j;
max_val = val;
}
}
/* Compare with true label */
if (max_id == label) {
hits++;
}
samples++;
}
}
#pragma omp barrier
} else {
for ( i = 0; i < num_layers; ++i) {
libxsmm_bfloat16 *input_act_ptr = (i == 0) ? input_acts_bf16 + batch_id * MB * C[0] : act_libxsmm_bf16[i];
libxsmm_dnn_fc_fwd_exec_bf16( libxsmm_dnn_fc_fwd[i], fil_libxsmm_bf16[i], input_act_ptr, act_libxsmm_bf16[i+1],
bias_libxsmm_bf16[i], relumask_libxsmm[i], 0, tid, scratch );
}
libxsmm_dnn_smax_fwd_exec_bf16( libxsmm_dnn_smax_fwd, act_libxsmm_bf16[num_layers], act_libxsmm_bf16[num_layers+1], test_label + batch_id * MB, &loss,
0, tid, scratch );
if (tid == 0) {
for (_i = 0; _i < MB; _i++) {
int label = *(test_label + batch_id * MB + _i);
int max_id = 0;
float max_val = 0.0;
libxsmm_convert_bf16_f32( act_libxsmm_bf16[num_layers+1] + _i * 10, &max_val, 1 );
/* Find predicted label */
for (_j = 1; _j < 10; _j++) {
libxsmm_bfloat16 val = *(act_libxsmm_bf16[num_layers+1] + _i * 10 + _j);
float f32_val;
libxsmm_convert_bf16_f32( &val, &f32_val, 1 );
if (f32_val > max_val) {
max_id = _j;
max_val = f32_val;
}
}
/* Compare with true label */
if (max_id == label) {
hits++;
}
samples++;
}
}
#pragma omp barrier
}
}
}
printf("Accuracy is %f %% (%d test samples)\n", (1.0*hits)/(1.0*samples)*100.0, samples);
#endif
/* deallocate data */
if ( scratch != NULL ) {
libxsmm_free(scratch);
}
if ( prec_bf16 > 0 ) {
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
libxsmm_free(act_libxsmm_bf16[i]);
libxsmm_free(delact_libxsmm_bf16[i]);
}
libxsmm_free(act_libxsmm_bf16[i+1]);
libxsmm_free(delact_libxsmm_bf16[i+1]);
libxsmm_free(fil_libxsmm_bf16[i]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm_bf16[i]);
libxsmm_free(bias_libxsmm_bf16[i]);
libxsmm_free(delbias_libxsmm_bf16[i]);
libxsmm_free(relumask_libxsmm[i]);
}
libxsmm_free(act_libxsmm_bf16[num_layers+1]);
libxsmm_free(label_libxsmm);
libxsmm_free(input_acts_bf16);
free( act_libxsmm_bf16 );
free( delact_libxsmm_bf16 );
free( fil_libxsmm_bf16 );
free( fil_libxsmm );
free( delfil_libxsmm_bf16 );
free( bias_libxsmm_bf16 );
free( delbias_libxsmm_bf16 );
free( relumask_libxsmm );
} else {
for ( i = 0; i < num_layers; ++i ) {
if ( i == 0 ) {
libxsmm_free(act_libxsmm[i]);
libxsmm_free(delact_libxsmm[i]);
}
libxsmm_free(act_libxsmm[i+1]);
libxsmm_free(delact_libxsmm[i+1]);
libxsmm_free(fil_libxsmm[i]);
libxsmm_free(delfil_libxsmm[i]);
libxsmm_free(bias_libxsmm[i]);
libxsmm_free(delbias_libxsmm[i]);
libxsmm_free(relumask_libxsmm[i]);
}
libxsmm_free(act_libxsmm[num_layers+1]);
libxsmm_free(label_libxsmm);
libxsmm_free(input_acts);
free( act_libxsmm );
free( delact_libxsmm );
free( fil_libxsmm );
free( delfil_libxsmm );
free( bias_libxsmm );
free( delbias_libxsmm );
free( relumask_libxsmm );
}
free( libxsmm_dnn_opt );
free( libxsmm_dnn_fc_fwd );
free( libxsmm_dnn_fc_bwd );
free( C );
/* some empty lines at the end */
printf("\n\n\n");
return 0;
}
|
GB_binop__bset_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_int64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_int64)
// C=scalar+B GB (_bind1st__bset_int64)
// C=scalar+B' GB (_bind1st_tran__bset_int64)
// C=A+scalar GB (_bind2nd__bset_int64)
// C=A'+scalar GB (_bind2nd_tran__bset_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = GB_BITSET (aij, bij, int64_t, 64)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, int64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_INT64 || GxB_NO_BSET_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, int64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, int64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bset_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, int64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bset_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__identity_int16_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_int16_int32
// op(A') function: GB_unop_tran__identity_int16_int32
// C type: int16_t
// A type: int32_t
// cast: int16_t cij = (int16_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int16_t z = (int16_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int16_t z = (int16_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT16 || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_int16_int32
(
int16_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int16_t z = (int16_t) aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int16_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
qsort_arg_mt.c | /*
* Imported from PostgreSQL sources by Teodor Sigaev <teodor@sigaev.ru>, <sigaev@corp.mail.ru>
*/
/*
* qsort_arg.c: qsort with a passthrough "void *" argument
*
* Modifications from vanilla NetBSD source:
* Add do ... while() macro fix
* Remove __inline, _DIAGASSERTs, __P
* Remove ill-considered "swap_cnt" switch to insertion sort,
* in favor of a simple check for presorted input.
*
* CAUTION: if you change this file, see also qsort.c
*
* $PostgreSQL: pgsql/src/port/qsort_arg.c,v 1.4 2007/03/18 05:36:50 neilc Exp $
*/
/* $NetBSD: qsort.c,v 1.13 2003/08/07 16:43:42 agc Exp $ */
/*-
* Copyright (c) 1992, 1993
* The Regents of the University of California. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of the University nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <third_party/qsort_arg.h>
#include <stdint.h>
#if defined(__cplusplus)
extern "C" {
#endif /* defined(__cplusplus) */
#define min(a, b) (a) < (b) ? a : b
static char *med3(char *a, char *b, char *c,
int (*cmp)(const void *a, const void *b, void *arg), void *arg);
static void swapfunc(char *, char *, size_t, int);
/**
* @brief Reduce the current number of threads in the thread pool to the
* bare minimum. Doesn't prevent the pool from spawning new threads later
* if demand mounts.
*/
static void
thread_pool_trim()
{
/*
* Trim OpenMP thread pool.
* Though we lack the direct control the workaround below works for
* GNU OpenMP library. The library stops surplus threads on entering
* a parallel region. Can't go below 2 threads due to the
* implementation quirk.
*/
#pragma omp parallel num_threads(2)
;
}
/*
* Qsort routine based on J. L. Bentley and M. D. McIlroy,
* "Engineering a sort function",
* Software--Practice and Experience 23 (1993) 1249-1265.
* We have modified their original by adding a check for already-sorted input,
* which seems to be a win per discussions on pgsql-hackers around 2006-03-21.
*/
#define swapcode(TYPE, parmi, parmj, n) \
do { \
size_t i = (n) / sizeof (TYPE); \
TYPE *pi = (TYPE *)(void *)(parmi); \
TYPE *pj = (TYPE *)(void *)(parmj); \
do { \
TYPE t = *pi; \
*pi++ = *pj; \
*pj++ = t; \
} while (--i > 0); \
} while (0)
#define SWAPINIT(a, es) swaptype = ((char *)(a) - (char *)0) % sizeof(long) || \
(es) % sizeof(long) ? 2 : (es) == sizeof(long)? 0 : 1;
static void
swapfunc(char *a, char *b, size_t n, int swaptype)
{
if (swaptype <= 1)
swapcode(long, a, b, n);
else
swapcode(char, a, b, n);
}
#define swap(a, b) \
if (swaptype == 0) { \
long t = *(long *)(void *)(a); \
*(long *)(void *)(a) = *(long *)(void *)(b); \
*(long *)(void *)(b) = t; \
} else \
swapfunc(a, b, es, swaptype)
#define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype)
static char *
med3(char *a, char *b, char *c, int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
return cmp(a, b, arg) < 0 ?
(cmp(b, c, arg) < 0 ? b : (cmp(a, c, arg) < 0 ? c : a))
: (cmp(b, c, arg) > 0 ? b : (cmp(a, c, arg) < 0 ? a : c));
}
static void
qsort_arg_mt_internal(void *a, size_t n, size_t es,
int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
char *pa, *pb, *pc, *pd, *pl, *pm, *pn;
intptr_t d, r, swaptype, presorted;
loop:SWAPINIT(a, es);
if (n < 7)
{
for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
for (pl = pm; pl > (char *) a && cmp(pl - es, pl, arg) > 0;
pl -= es)
swap(pl, pl - es);
return;
}
presorted = 1;
for (pm = (char *) a + es; pm < (char *) a + n * es; pm += es)
{
if (cmp(pm - es, pm, arg) > 0)
{
presorted = 0;
break;
}
}
if (presorted)
return;
pm = (char *) a + (n / 2) * es;
if (n > 7)
{
pl = (char *) a;
pn = (char *) a + (n - 1) * es;
if (n > 40)
{
d = (n / 8) * es;
pl = med3(pl, pl + d, pl + 2 * d, cmp, arg);
pm = med3(pm - d, pm, pm + d, cmp, arg);
pn = med3(pn - 2 * d, pn - d, pn, cmp, arg);
}
pm = med3(pl, pm, pn, cmp, arg);
}
swap((char*)a, pm);
pa = pb = (char *) a + es;
pc = pd = (char *) a + (n - 1) * es;
for (;;)
{
while (pb <= pc && (r = cmp(pb, a, arg)) <= 0)
{
if (r == 0)
{
swap(pa, pb);
pa += es;
}
pb += es;
}
while (pb <= pc && (r = cmp(pc, a, arg)) >= 0)
{
if (r == 0)
{
swap(pc, pd);
pd -= es;
}
pc -= es;
}
if (pb > pc)
break;
swap(pb, pc);
pb += es;
pc -= es;
}
pn = (char *) a + n * es;
r = min(pa - (char *) a, pb - pa);
vecswap((char*)a, pb - r, r);
r = min(pd - pc, pn - pd - es);
vecswap(pb, pn - r, r);
if ((r = pb - pa) > es) {
#pragma omp task
qsort_arg_mt_internal(a, r / es, es, cmp, arg);
}
if ((r = pd - pc) > es)
{
/* Iterate rather than recurse to save stack space */
a = pn - r;
n = r / es;
goto loop;
}
}
void
qsort_arg(void *a, size_t n, size_t es,
int (*cmp)(const void *a, const void *b, void *arg), void *arg)
{
#pragma omp parallel
{
#pragma omp single
qsort_arg_mt_internal(a, n, es, cmp, arg);
}
thread_pool_trim();
}
#if defined(__cplusplus)
}
#endif /* defined(__cplusplus) */
|
omp_lock.c | #include <omp.h>
#include <stdio.h>
#define THREADS 512
#define WARPSIZE 64
#pragma omp declare target
omp_lock_t lock;
#pragma omp end declare target
int main() {
int error = 0;
unsigned count = 0; // incremented within target region
unsigned expected_count = 0; // incremented on host
#pragma omp target
omp_init_lock(&lock);
// The lock implementation picks a thread from the warp to avoid the
// deadlock that results if multiple threads try to CAS-loop at once
// The lower/upper construct checks various active warp patterns
const int edges[] = {0, 1, 32, 62, 63};
const int N = sizeof(edges) / sizeof(edges[0]);
for (int l = 0; l < N; l++) {
for (int u = 0; u < N; u++) {
int lower = edges[l];
int upper = edges[u];
if (lower > upper)
continue;
expected_count += THREADS / WARPSIZE;
#pragma omp target parallel num_threads(THREADS) map(tofrom : error, count)
{
int lane_id = omp_ext_get_lane_id();
if (lane_id >= lower && lane_id <= upper) {
omp_set_lock(&lock); // mutex acts on a per warp basis
if (omp_ext_get_lane_id() == lower) {
// Increment once per warp
count++;
}
if (!omp_test_lock(&lock)) {
error = 1;
}
omp_unset_lock(&lock);
}
}
}
}
#pragma omp target
omp_destroy_lock(&lock);
if (count != expected_count) {
error = 1;
}
return error;
}
|
GB_unop__identity_uint8_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_fc64
// op(A') function: GB_unop_tran__identity_uint8_fc64
// C type: uint8_t
// A type: GxB_FC64_t
// cast: uint8_t cij = GB_cast_to_uint8_t (creal (aij))
// unaryop: cij = aij
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_fc64
(
uint8_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t (creal (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_fc64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
mixed_tentusscher_myo_epi_2004_S2_12.c | // Scenario 2 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium)
// (AP + max:dvdt)
#include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004_S2_12.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
// Initial conditions for TenTusscher epicardium
else
{
// Default initial conditions
/*
sv[0] = INITIAL_V; // V; millivolt
sv[1] = 0.f; //M
sv[2] = 0.75; //H
sv[3] = 0.75f; //J
sv[4] = 0.f; //Xr1
sv[5] = 1.f; //Xr2
sv[6] = 0.f; //Xs
sv[7] = 1.f; //S
sv[8] = 0.f; //R
sv[9] = 0.f; //D
sv[10] = 1.f; //F
sv[11] = 1.f; //FCa
sv[12] = 1.f; //G
sv[13] = 0.0002; //Cai
sv[14] = 0.2f; //CaSR
sv[15] = 11.6f; //Nai
sv[16] = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.5384082987608,0.00129831811982894,0.778973907375400,0.778770359064450,0.000175617603671969,0.484779426120966,0.00294656185560798,0.999998338163124,1.94314968247144e-08,1.89860853576860e-05,0.999768890841901,1.00699067411443,0.999993647054558,4.76253466507928e-05,1.07498477804450,9.16427826493556,140.147002852970};
for (uint32_t i = 0; i < NEQ; i++)
sv[i] = sv_sst[i];
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real parameters []={13.6678942151156,0.000256568439836354,0.000168998708141810,0.000629430277377532,0.292179614925966,0.159025382216859,0.219408745068892,4.13026404897143,0.0228716387184266,2.64548400409568,1084.92169869363,0.000464164881016237,0.0900142389778237,0.0125301647159537,0.00898985291692117,1.90563632103728e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
//TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
mixed_tentusscher_myo_epi_2004.c | #include <stdio.h>
#include "mixed_tentusscher_myo_epi_2004.h"
GET_CELL_MODEL_DATA(init_cell_model_data)
{
if(get_initial_v)
cell_model->initial_v = INITIAL_V;
if(get_neq)
cell_model->number_of_ode_equations = NEQ;
}
SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu)
{
static bool first_call = true;
if(first_call)
{
print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n");
first_call = false;
}
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
// Initial conditions for TenTusscher myocardium
if (mapping[sv_id] == 0)
{
/// initial condition
real sv11[]={86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633};
sv[0] = sv11[0]; // V; millivolt
sv[1] = sv11[1]; //M
sv[2] = sv11[2]; //H
sv[3] = sv11[3]; //J
sv[4] = sv11[4]; //Xr1
sv[5] = sv11[5]; //Xr2
sv[6] = sv11[6]; //Xs
sv[7] = sv11[7]; //S
sv[8] = sv11[8]; //R
sv[9] = sv11[9]; //D
sv[10] = sv11[10]; //F
sv[11] = sv11[11]; //FCa
sv[12] = sv11[12]; //G
sv[13] = sv11[13]; //Cai
sv[14] = sv11[14]; //CaSR
sv[15] = sv11[15]; //Nai
sv[16] = sv11[16]; //Ki
// sv[0] = INITIAL_V; // V; millivolt
// sv[1] = 0.f; //M
// sv[2] = 0.75; //H
// sv[3] = 0.75f; //J
// sv[4] = 0.f; //Xr1
// sv[5] = 1.f; //Xr2
// sv[6] = 0.f; //Xs
// sv[7] = 1.f; //S
// sv[8] = 0.f; //R
// sv[9] = 0.f; //D
// sv[10] = 1.f; //F
// sv[11] = 1.f; //FCa
// sv[12] = 1.f; //G
// sv[13] = 0.0002; //Cai
// sv[14] = 0.2f; //CaSR
// sv[15] = 11.6f; //Nai
// sv[16] = 138.3f; //Ki
}
// Initial conditions for TenTusscher epicardium
else
{
// sv[0] = INITIAL_V; // V; millivolt
// sv[1] = 0.f; //M
// sv[2] = 0.75; //H
// sv[3] = 0.75f; //J
// sv[4] = 0.f; //Xr1
// sv[5] = 1.f; //Xr2
// sv[6] = 0.f; //Xs
// sv[7] = 1.f; //S
// sv[8] = 0.f; //R
// sv[9] = 0.f; //D
// sv[10] = 1.f; //F
// sv[11] = 1.f; //FCa
// sv[12] = 1.f; //G
// sv[13] = 0.0002; //Cai
// sv[14] = 0.2f; //CaSR
// sv[15] = 11.6f; //Nai
// sv[16] = 138.3f; //Ki
/// initial condition
real sv11[]={-86.4172552153702,0.00133233093318418,0.775980725003160,0.775871451583533,0.000178484465968596,0.483518904573916,0.00297208335439809,0.999998297825169,1.98274727808946e-08,1.92952362196655e-05,0.999768268008847,1.00667048889468,0.999984854519288,5.50424977684767e-05,0.352485262813812,10.8673127043200,138.860197273148};
sv[0] = sv11[0]; // V; millivolt
sv[1] = sv11[1]; //M
sv[2] = sv11[2]; //H
sv[3] = sv11[3]; //J
sv[4] = sv11[4]; //Xr1
sv[5] = sv11[5]; //Xr2
sv[6] = sv11[6]; //Xs
sv[7] = sv11[7]; //S
sv[8] = sv11[8]; //R
sv[9] = sv11[9]; //D
sv[10] = sv11[10]; //F
sv[11] = sv11[11]; //FCa
sv[12] = sv11[12]; //G
sv[13] = sv11[13]; //Cai
sv[14] = sv11[14]; //CaSR
sv[15] = sv11[15]; //Nai
sv[16] = sv11[16]; //Ki
}
}
SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu)
{
// Get the mapping array
uint32_t *mapping = NULL;
if(extra_data)
{
mapping = (uint32_t*)extra_data;
}
else
{
print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n");
}
uint32_t sv_id;
int i;
#pragma omp parallel for private(sv_id)
for (i = 0; i < num_cells_to_solve; i++)
{
if(cells_to_solve)
sv_id = cells_to_solve[i];
else
sv_id = (uint32_t )i;
for (int j = 0; j < num_steps; ++j)
{
if (mapping[i] == 0)
solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]);
else
solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]);
}
}
}
void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_myo(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Myocardium cell
real Gks=0.062;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Myocardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
// [!] Myocardium cell
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current)
{
real rY[NEQ], rDY[NEQ];
for(int i = 0; i < NEQ; i++)
rY[i] = sv[i];
RHS_cpu_epi(rY, rDY, stim_current, dt);
for(int i = 0; i < NEQ; i++)
sv[i] = rDY[i];
}
void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt)
{
// State variables
real svolt = sv[0];
real sm = sv[1];
real sh = sv[2];
real sj = sv[3];
real sxr1 = sv[4];
real sxr2 = sv[5];
real sxs = sv[6];
real ss = sv[7];
real sr = sv[8];
real sd = sv[9];
real sf = sv[10];
real sfca = sv[11];
real sg = sv[12];
real Cai = sv[13];
real CaSR = sv[14];
real Nai = sv[15];
real Ki = sv[16];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
// [!] Epicardium cell
real Gks=0.245;
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
// [!] Epicardium cell
real Gto=0.294;
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
Irel=A*sd*sg;
Ileak=0.00008f*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37.0)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37.0)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
trsm_x_csr_n_lo_row.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#include <memory.h>
alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_CSR *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy)
{
ALPHA_INT m = A->rows;
ALPHA_Number diag[m];
memset(diag, '\0', m * sizeof(ALPHA_Number));
int num_thread = alpha_get_thread_num();
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for (ALPHA_INT r = 0; r < m; r++)
{
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac == r)
{
diag[r] = A->values[ai];
}
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(num_thread)
#endif
for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++)
{
for (ALPHA_INT r = 0; r < m; r++)
{
ALPHA_Number temp;
alpha_setzero(temp);
for (ALPHA_INT ai = A->rows_start[r]; ai < A->rows_end[r]; ai++)
{
ALPHA_INT ac = A->col_indx[ai];
if (ac < r)
{
alpha_madde(temp, A->values[ai], y[ac * ldy + out_y_col]);
}
}
ALPHA_Number t;
alpha_setzero(t);
alpha_mul(t, alpha, x[r * ldx + out_y_col]);
alpha_sube(t, temp);
alpha_div(y[r * ldy + out_y_col], t, diag[r]);
}
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
|
questao03.c | #include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "omp.h"
int main() {
long int **matriz;
long int sum_s = 0, sum_p = 0;
int entrada, i, j;
double t1_s, t2_s, t1_p, t2_p;
srand(time(0));
printf("Informe um numero inteiro positivo: ");
scanf("%d", &entrada);
if(entrada < 0) {
printf("\nNumero invalido");
return 1;
}
matriz = (long int**)malloc(entrada * sizeof(long int));
for(i = 0; i <= entrada; i++) {
matriz[i] = (long int*)malloc(entrada * sizeof(long int));
}
for(i = 0; i <= entrada; i++) {
matriz[i][0] = abs(rand());
matriz[0][i] = abs(rand());
}
for(i = 1; i <= entrada; i++) {
for(j = 1; j <= entrada; j++) {
matriz[i][j] = matriz[i - 1][j] + matriz[i][j - 1];
}
}
// for(i = 0; i <= entrada; i++) {
// for(j = 0; j <= entrada; j++) {
// printf("%ld ", matriz[i][j]);
// }
// printf("\n");
// }
t1_s = omp_get_wtime();
for(i = 1; i <= entrada; i++) {
for(j = 1; j <= entrada; j++) {
sum_s += matriz[i][j];
}
}
t2_s = omp_get_wtime();
t1_p = omp_get_wtime();
#pragma omp parallel
{
long int local_sum = 0;
#pragma omp for
for(i = 0; i <= entrada; i++) {
for(j = 0; j <= entrada; j++) {
local_sum += matriz[i][j];
}
}
#pragma omp critical
{
sum_p += local_sum;
}
}
t2_p = omp_get_wtime();
printf("\nSequencial");
printf("\nSoma : %ld", sum_s);
printf("\nTempo: %lf\n", t2_s - t1_s);
printf("\nParalelo");
printf("\nSoma : %ld", sum_p);
printf("\nTempo: %lf\n", t2_p - t1_p);
for(i = 0; i <= entrada; i++) {
free(matriz[i]);
}
free(matriz);
return 0;
}
|
re_model_template.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_RE_MODEL_TEMPLATE_H_
#define GPB_RE_MODEL_TEMPLATE_H_
#include <GPBoost/log.h>
#include <GPBoost/type_defs.h>
#include <GPBoost/re_comp.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <GPBoost/Vecchia_utils.h>
#include <GPBoost/GP_utils.h>
//#include <Eigen/src/misc/lapack.h>
#include <memory>
#include <mutex>
#include <vector>
#include <algorithm> // std::shuffle
#include <random> // std::default_random_engine
//#include <typeinfo> // Only needed for debugging
//#include <chrono> // Only needed for debugging
//#include <thread> // Only needed for debugging
//Log::Info("Fine here ");// Only for debugging
//std::this_thread::sleep_for(std::chrono::milliseconds(20));
namespace GPBoost {
/*!
* \brief Template class used in the wrapper class REModel
* The template parameters T1 and T2 can either be <sp_mat_t, chol_sp_mat_t> or <den_mat_t, chol_den_mat_t>
*/
template<typename T1, typename T2>
class REModelTemplate {
public:
/*! \brief Null costructor */
REModelTemplate();
/*!
* \brief Costructor
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of random effects / Gaussian processes (same values = same process realization)
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param num_re_group Number of grouped (intercept) random effects
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_re_group_rand_coef Number of grouped random coefficient
* \param num_gp Number of (intercept) Gaussian processes
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian process. We follow the notation and parametrization of Diggle and Ribeiro (2007) except for the Matern covariance where we follow Rassmusen and Williams (2006)
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance, irrelevant for some covariance functions such as the exponential or Gaussian)
* \param vecchia_approx If true, the Veccia approximation is used for the Gaussian process
* \param num_neighbors The number of neighbors used in the Vecchia approximation
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions
*/
REModelTemplate(data_size_t num_data, const gp_id_t* cluster_ids_data = nullptr, const char* re_group_data = nullptr,
data_size_t num_re_group = 0, const double* re_group_rand_coef_data = nullptr,
const int32_t* ind_effect_group_rand_coef = nullptr, data_size_t num_re_group_rand_coef = 0,
data_size_t num_gp = 0, const double* gp_coords_data = nullptr, int dim_gp_coords = 2,
const double* gp_rand_coef_data = nullptr, data_size_t num_gp_rand_coef = 0,
const char* cov_fct = nullptr, double cov_fct_shape = 0., bool vecchia_approx = false, int num_neighbors = 30,
const char* vecchia_ordering = nullptr, const char* vecchia_pred_type = nullptr, int num_neighbors_pred = 30) {
num_cov_par_ = 1;
CHECK(num_data > 0);
num_data_ = num_data;
vecchia_approx_ = vecchia_approx;
//Set up GP IDs
SetUpGPIds(num_data_, cluster_ids_data, num_data_per_cluster_, data_indices_per_cluster_, unique_clusters_, num_clusters_);
//Indices of parameters of individual components in joint parameter vector
ind_par_.push_back(0);//0+1 is starting point of parameter for first component since the first parameter is the nugget effect variance
num_comps_total_ = 0;
//Do some checks for grouped RE components and set meta data (number of components etc.)
std::vector<std::vector<string_t>> re_group_levels;//Matrix with group levels for the grouped random effects (re_group_levels[j] contains the levels for RE number j)
if (num_re_group > 0) {
if (vecchia_approx) {
Log::Fatal("The Veccia approximation cannot be used when there are grouped random effects (in the current implementation).");
}
num_re_group_ = num_re_group;
CHECK(re_group_data != nullptr);
if (num_re_group_rand_coef > 0) {
num_re_group_rand_coef_ = num_re_group_rand_coef;
CHECK(re_group_rand_coef_data != nullptr);
CHECK(ind_effect_group_rand_coef != nullptr);
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
CHECK(0 < ind_effect_group_rand_coef[j] && ind_effect_group_rand_coef[j] <= num_re_group_);
}
ind_effect_group_rand_coef_ = std::vector<int>(ind_effect_group_rand_coef, ind_effect_group_rand_coef + num_re_group_rand_coef_);
}
num_re_group_total_ = num_re_group_ + num_re_group_rand_coef_;
num_cov_par_ += num_re_group_total_;
num_comps_total_ += num_re_group_total_;
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_re_group_total_; ++j) {
ind_par_.push_back(1 + j);//end points of parameter indices of components
}
// Convert characters in 'const char* re_group_data' to matrix (num_re_group_ x num_data_) with strings of group labels
re_group_levels = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_));
if (num_re_group_ > 0) {
ConvertCharToStringGroupLevels(num_data_, num_re_group_, re_group_data, re_group_levels);
}
}
//Do some checks for GP components and set meta data (number of components etc.)
if (num_gp > 0) {
if (num_gp > 2) {
Log::Fatal("num_gp can only be either 0 or 1 in the current implementation");
}
num_gp_ = num_gp;
ind_intercept_gp_ = num_comps_total_;
CHECK(dim_gp_coords > 0);
CHECK(gp_coords_data != nullptr);
CHECK(cov_fct != nullptr);
dim_gp_coords_ = dim_gp_coords;
cov_fct_ = std::string(cov_fct);
cov_fct_shape_ = cov_fct_shape;
if (vecchia_approx) {
Log::Info("Starting nearest neighbor search for Vecchia approximation");
CHECK(num_neighbors > 0);
num_neighbors_ = num_neighbors;
CHECK(num_neighbors_pred > 0);
num_neighbors_pred_ = num_neighbors_pred;
if (vecchia_ordering == nullptr) {
vecchia_ordering_ = "none";
}
else {
vecchia_ordering_ = std::string(vecchia_ordering);
CHECK(vecchia_ordering_ == "none" || vecchia_ordering_ == "random");
}
if (vecchia_pred_type == nullptr) {
vecchia_pred_type_ = "order_obs_first_cond_obs_only";
}
else {
vecchia_pred_type_ = std::string(vecchia_pred_type);
if (SUPPORTED_VECCHIA_PRED_TYPES_.find(vecchia_pred_type_) == SUPPORTED_VECCHIA_PRED_TYPES_.end()) {
Log::Fatal("Prediction type '%s' is not supported for the Veccia approximation.", vecchia_pred_type_.c_str());
}
}
}
if (num_gp_rand_coef > 0) {//Random slopes
CHECK(gp_rand_coef_data != nullptr);
num_gp_rand_coef_ = num_gp_rand_coef;
}
num_gp_total_ = num_gp_ + num_gp_rand_coef_;
num_cov_par_ += (2 * num_gp_total_);
num_comps_total_ += num_gp_total_;
//Add indices of parameters of individual components in joint parameter vector
for (int j = 0; j < num_gp_total_; ++j) {
ind_par_.push_back(ind_par_.back() + 2);//end points of parameter indices of components
}
if (vecchia_approx) {
double num_mem_d = ((double)num_gp_total_) * ((double)num_data_) * ((double)num_neighbors_) * ((double)num_neighbors_);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 8000) {
Log::Warning("The current implementation of the Vecchia approximation is not optimized for memory usage. In your case (num. obs. = %d and num. neighbors = %d), at least approximately %d mb of memory is needed. If this is a problem, contact the developer of this package and ask to implement this feature.", num_data_, num_neighbors_, mem_size);
}
}
}
if (num_re_group_ > 0 && num_gp_total_ == 0) {
do_symbolic_decomposition_ = true;//Symbolic decompostion is only done if sparse matrices are used
}
else {
do_symbolic_decomposition_ = false;
}
//Create RE/GP component models
for (const auto& cluster_i : unique_clusters_) {
ConstructI<T1>(cluster_i);//Idendity matrices needed for computing inverses of covariance matrices used in gradient descent
std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_[cluster_i]);
CreateREComponentsVecchia(num_data_, data_indices_per_cluster_, cluster_i, num_data_per_cluster_,
gp_coords_data, dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, vecchia_ordering_, num_neighbors_);
nearest_neighbors_.insert({ cluster_i, nearest_neighbors_cluster_i });
dist_obs_neighbors_.insert({ cluster_i, dist_obs_neighbors_cluster_i });
dist_between_neighbors_.insert({ cluster_i, dist_between_neighbors_cluster_i });
entries_init_B_.insert({ cluster_i, entries_init_B_cluster_i });
entries_init_B_grad_.insert({ cluster_i, entries_init_B_grad_cluster_i });
z_outer_z_obs_neighbors_.insert({ cluster_i, z_outer_z_obs_neighbors_cluster_i });
Log::Info("Nearest neighbors for Vecchia approximation found");
}
else {
CreateREComponents(num_data_, num_re_group_, data_indices_per_cluster_, cluster_i, re_group_levels, num_data_per_cluster_,
num_re_group_rand_coef_, re_group_rand_coef_data, ind_effect_group_rand_coef_, num_gp_, gp_coords_data,
dim_gp_coords_, gp_rand_coef_data, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i);
}
re_comps_.insert({ cluster_i, re_comps_cluster_i });
}
////Following only prints stuff for testing. TODO: delete
//Log::Info("********************** Meta data ********************************");
//Log::Info("num_data_ : %d", num_data_);
//Log::Info("num_clusters_ : %d", num_clusters_);
//Log::Info("num_re_group_ : %d", num_re_group_);
//Log::Info("num_re_group_rand_coef_ : %d", num_re_group_rand_coef_);
//Log::Info("num_re_group_total_ : %d", num_re_group_total_);
//Log::Info("num_gp_rand_coef_ : %d", num_gp_rand_coef_);
//Log::Info("num_gp_total_ : %d", num_gp_total_);
//Log::Info("num_cov_par_: %d", num_cov_par_);
//for (unsigned i = 0; i < ind_par_.size(); i++) { Log::Info("ind_par_[%d]: %d", i, ind_par_[i]); }
//Log::Info("******************************************************");
//int ii = 0;
//for (const auto& cluster_i : unique_clusters_) {
// Log::Info("unique_clusters_[%d]: %d", ii, cluster_i);
// Log::Info("num_data_per_cluster_[%d]: %d", cluster_i, num_data_per_cluster_[cluster_i]);
// //for (int j = 0; j < std::min((int)data_indices_per_cluster_[cluster_i].size(), 10); ++j) { Log::Info("data_indices_per_cluster_[%d][%d]: %d", cluster_i, j, data_indices_per_cluster_[cluster_i][j]); }
// if (num_re_group_ > 0) {
// Log::Info("*********************** Grouped REs *******************************");
// //Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name());
// //Log::Info("re_comps_[cluster_i].size(): %d", re_comps_[cluster_i].size());
// //for (const auto& re_comp : re_comps_[cluster_i]) {
// for (int j = 0; j < re_comps_[cluster_i].size(); ++j) {
// std::shared_ptr<RECompGroup<T1>> re_comp_group = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][j]);
// //for (const auto& el : re_comp_group->group_data_) { Log::Info("re_comps_[%d][j].group_data_[i]: %d", cluster_i, el); }
// if (!re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min((int)(*re_comp_group->group_data_).size(), 10); i++) { Log::Info("re_comps_[%d][%d].group_data_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// }
// else if (re_comp_group->is_rand_coef_) {
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].group_data_ref_[%d]: %s", cluster_i, j, i, (*re_comp_group->group_data_)[i]); }
// for (int i = 0; i < std::min(num_data_per_cluster_[cluster_i], 10); i++) { Log::Info("re_comps_[%d][%d].rand_coef_data_[%d]: %f", cluster_i, j, i, re_comp_group->rand_coef_data_[i]); }
// }
// }
// }
// ii++;
//}
}
/*! \brief Destructor */
~REModelTemplate() {
}
/*! \brief Disable copy */
REModelTemplate& operator=(const REModelTemplate&) = delete;
/*! \brief Disable copy */
REModelTemplate(const REModelTemplate&) = delete;
/*!
* \brief Find parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] num_it Number of iterations
* \param lr Learning rate
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in parameters is below this value
* \param optimizer Options: "gradient_descent" or "fisher_scoring"
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
* \param cov_pars_lag_1 Covariance parameters from previous iteration used for Nesterov step (on transformed scale). Default = nullptr
*/
void OptimCovPar(const double* y_data, double* init_cov_pars, double* optim_cov_pars,
int& num_it, double lr = 0.01, double acc_rate_cov = 0.5, int momentum_offset = 2,
int max_iter = 1000, double delta_rel_conv = 1.0e-6, string_t optimizer = "fisher_scoring",
bool use_nesterov_acc = true, int nesterov_schedule_version = 0,
double* std_dev_cov_par = nullptr, bool calc_std_dev = false, double* cov_pars_lag_1 = nullptr) {
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer.c_str());
}
SetY(y_data);
vec_t cov_pars = Eigen::Map<vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = (cov_pars_lag_1 == nullptr) ? cov_pars : cov_pars_lag1;
num_it = max_iter;
Log::Debug("Initial covariance parameters");
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
for (int it = 0; it < max_iter; ++it) {
ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset);
SetCovParsComps(cov_pars);
CalcCovFactor(vecchia_approx_, true, 1., false);//Create covariance matrix and factorize it (and also calculate derivatives if Vecchia approximation is used)
CalcYAux();
if (optimizer == "gradient_descent") {//gradient descent
UpdateCovParGradOneIter(lr, cov_pars);
}
else if (optimizer == "fisher_scoring") {//Fisher scoring
UpdateCovParFisherScoringOneIter(cov_pars);
}
CheckNaN(cov_pars);
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::Debug("Covariance parameter estimation: iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
}
if ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv) {
num_it = it + 1;
break;
}
}
if (num_it == max_iter) {
Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations. If this is a problem, you might consider increasing the number of iterations or using a different learning rate.");
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
has_covariates_ = false;
}
/*!
* \brief Find linear regression coefficients and covariance parameters that minimize the negative log-ligelihood (=MLE) using (Nesterov accelerated) gradient descent
* Note: You should pre-allocate memory for optim_cov_pars and optim_coef. Their length equal the number of covariance parameters and the number of regression coefficients
* If calc_std_dev=true, you also need to pre-allocate memory for std_dev_cov_par and std_dev_coef of the same length for the standard deviations
* \param y_data Response variable data
* \param covariate_data Covariate data (=independent variables, features)
* \param num_covariates Number of covariates
* \param[out] optim_cov_pars Optimal covariance parameters
* \param[out] optim_coef Optimal regression coefficients
* \param[out] num_it Number of iterations
* \param init_cov_pars Initial values for covariance parameters of RE components
* \param init_coef Initial values for the regression coefficients
* \param lr_coef Learning rate for fixed-effect linear coefficients
* \param lr_cov Learning rate for covariance parameters
* \param acc_rate_coef Acceleration rate for coefficients for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param acc_rate_cov Acceleration rate for covariance parameters for Nesterov acceleration (only relevant if nesterov_schedule_version == 0).
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
* \param max_iter Maximal number of iterations
* \param delta_rel_conv Convergence criterion: stop iteration if relative change in in parameters is below this value
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param optimizer_cov Optimizer for covariance parameters. Options: "gradient_descent" or "fisher_scoring"
* \param optimizer_coef Optimizer for coefficients. Options: "gradient_descent" or "wls" (coordinate descent using weighted least squares)
* \param[out] std_dev_cov_par Standard deviations for the covariance parameters
* \param[out] std_dev_coef Standard deviations for the coefficients
* \param calc_std_dev If true, asymptotic standard deviations for the MLE of the covariance parameters are calculated as the diagonal of the inverse Fisher information
*/
void OptimLinRegrCoefCovPar(const double* y_data, const double* covariate_data, int num_covariates,
double* optim_cov_pars, double* optim_coef, int& num_it, double* init_cov_pars, double* init_coef = nullptr,
double lr_coef = 0.01, double lr_cov = 0.01, double acc_rate_coef = 0.1, double acc_rate_cov = 0.5, int momentum_offset = 2,
int max_iter = 1000, double delta_rel_conv = 1.0e-6, bool use_nesterov_acc = true, int nesterov_schedule_version = 0,
string_t optimizer_cov = "fisher_scoring", string_t optimizer_coef = "wls", double* std_dev_cov_par = nullptr,
double* std_dev_coef = nullptr, bool calc_std_dev = false) {
if (SUPPORTED_OPTIM_COV_PAR_.find(optimizer_cov) == SUPPORTED_OPTIM_COV_PAR_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for covariance parameters.", optimizer_cov.c_str());
}
if (SUPPORTED_OPTIM_COEF_.find(optimizer_coef) == SUPPORTED_OPTIM_COEF_.end()) {
Log::Fatal("Optimizer option '%s' is not supported for regression coefficients.", optimizer_coef.c_str());
}
CHECK(covariate_data != nullptr);
has_covariates_ = true;
num_coef_ = num_covariates;
X_ = Eigen::Map<const den_mat_t>(covariate_data, num_data_, num_coef_);
//Check whether one of the colums contains only 1's and if not, give out warning
vec_t vec_ones(num_data_);
vec_ones.setOnes();
bool has_intercept = false;
for (int icol = 0; icol < num_coef_; ++icol) {
if ((X_.col(icol) - vec_ones).cwiseAbs().sum() < 0.001) {
has_intercept = true;
break;
}
}
if (!has_intercept) {
Log::Warning("The covariate data contains no column of ones. This means that there is no intercept included.");
}
y_vec_ = Eigen::Map<const vec_t>(y_data, num_data_);
vec_t cov_pars = Eigen::Map<const vec_t>(init_cov_pars, num_cov_par_);
vec_t cov_pars_lag1 = cov_pars;
vec_t beta(num_covariates);
if (init_coef == nullptr) {
beta.setZero();
}
else {
beta = Eigen::Map<const vec_t>(init_coef, num_covariates);
}
vec_t beta_lag1 = beta;
vec_t resid;
num_it = max_iter;
for (int it = 0; it < max_iter; ++it) {
if (it > 0) {
ApplyMomentumStep(it, cov_pars, cov_pars_lag1, use_nesterov_acc, acc_rate_cov, nesterov_schedule_version, true, momentum_offset);
if (optimizer_coef == "gradient_descent") {
ApplyMomentumStep(it, beta, beta_lag1, use_nesterov_acc, acc_rate_coef, nesterov_schedule_version, false, momentum_offset);
}
}
SetCovParsComps(cov_pars);
CalcCovFactor(vecchia_approx_, true, 1., false);
if (optimizer_coef == "gradient_descent") {//one step of gradient descent
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
UpdateCoefGradOneIter(lr_coef, cov_pars[0], X_, beta);
}
else if (optimizer_coef == "wls") {//coordinate descent using generalized least squares
SetY(y_vec_.data());
CalcYAux();
beta_lag1 = beta;
UpdateCoefGLS(X_, beta);
}
resid = y_vec_ - (X_ * beta);
SetY(resid.data());
CalcYAux();
if (optimizer_cov == "gradient_descent") {//one step of gradient descent
UpdateCovParGradOneIter(lr_cov, cov_pars);
}
else if (optimizer_cov == "fisher_scoring") {//one step of Fisher scoring
UpdateCovParFisherScoringOneIter(cov_pars);
}
CheckNaN(cov_pars);
if (it < 10 || ((it + 1) % 10 == 0 && (it + 1) < 100) || ((it + 1) % 100 == 0 && (it + 1) < 1000) || ((it + 1) % 1000 == 0 && (it + 1) < 10000) || ((it + 1) % 10000 == 0)) {
Log::Debug("Gradient descent iteration number %d", it + 1);
for (int i = 0; i < (int)cov_pars.size(); ++i) { Log::Debug("cov_pars[%d]: %f", i, cov_pars[i]); }
for (int i = 0; i < std::min((int)beta.size(), 3); ++i) { Log::Debug("beta[%d]: %f", i, beta[i]); }
}
if (((beta - beta_lag1).norm() / beta_lag1.norm() < delta_rel_conv) && ((cov_pars - cov_pars_lag1).norm() / cov_pars_lag1.norm() < delta_rel_conv)) {
num_it = it + 1;
break;
}
}
if (num_it == max_iter) {
Log::Warning("Covariance parameter estimation: no convergence after the maximal number of iterations");
}
for (int i = 0; i < num_cov_par_; ++i) {
optim_cov_pars[i] = cov_pars[i];
}
if (calc_std_dev) {
vec_t std_dev_cov(num_cov_par_);
CalcStdDevCovPar(cov_pars, std_dev_cov);
for (int i = 0; i < num_cov_par_; ++i) {
std_dev_cov_par[i] = std_dev_cov[i];
}
}
for (int i = 0; i < num_covariates; ++i) {
optim_coef[i] = beta[i];
}
if (calc_std_dev) {
vec_t std_dev_beta(num_covariates);
CalcStdDevCoef(cov_pars, X_, std_dev_beta);
for (int i = 0; i < num_covariates; ++i) {
std_dev_coef[i] = std_dev_beta[i];
}
}
}
/*!
* \brief Set the data used for making predictions (useful if the same data is used repeatedly, e.g., in validation of GPBoost)
* \param num_data_pred Number of data points for which predictions are made
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
*/
void SetPredictionData(int num_data_pred,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, const double* covariate_data_pred = nullptr) {
if (cluster_ids_data_pred == nullptr) {
cluster_ids_data_pred_.clear();
}
else {
cluster_ids_data_pred_ = std::vector<gp_id_t>(cluster_ids_data_pred, cluster_ids_data_pred + num_data_pred);
}
if (re_group_data_pred == nullptr) {
re_group_levels_pred_.clear();
if (num_re_group_ > 0) {
Log::Fatal("No group data is provided for making predictions");
}
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred_ = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred_);
}
if (re_group_rand_coef_data_pred == nullptr) {
re_group_rand_coef_data_pred_.clear();
}
else {
re_group_rand_coef_data_pred_ = std::vector<double>(re_group_rand_coef_data_pred, re_group_rand_coef_data_pred + num_data_pred * num_re_group_rand_coef_);
}
if (gp_coords_data_pred == nullptr) {
gp_coords_data_pred_.clear();
}
else {
gp_coords_data_pred_ = std::vector<double>(gp_coords_data_pred, gp_coords_data_pred + num_data_pred * dim_gp_coords_);
}
if (gp_rand_coef_data_pred == nullptr) {
gp_rand_coef_data_pred_.clear();
}
else {
gp_rand_coef_data_pred_ = std::vector<double>(gp_rand_coef_data_pred, gp_rand_coef_data_pred + num_data_pred * num_gp_rand_coef_);
}
if (covariate_data_pred == nullptr) {
covariate_data_pred_.clear();
}
else {
covariate_data_pred_ = std::vector<double>(covariate_data_pred, covariate_data_pred + num_data_pred * num_coef_);
}
}
/*!
* \brief Make predictions: calculate conditional mean and covariance matrix
* Note: You should pre-allocate memory for out_predict
* Its length is equal to num_data_pred if only the conditional mean is predicted (predict_cov_mat=false)
* or num_data_pred * (1 + num_data_pred) if both the conditional mean and covariance matrix are predicted (predict_cov_mat=true)
* \param cov_pars_pred Covariance parameters of components
* \param y_obs Response variable for observed data
* \param num_data_pred Number of data points for which predictions are made
* \param[out] out_predict Conditional mean at prediciton points (="predicted value") followed by (if predict_cov_mat=true) the conditional covariance matrix at in column-major format
* \param predict_cov_mat If true, the conditional covariance matrix is calculated (default=false)
* \param covariate_data_pred Covariate data (=independent variables, features) for prediction
* \param coef_pred Coefficients for linear covariates
* \param cluster_ids_data_pred IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization) for which predictions are to be made
* \param re_group_data_pred Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param re_group_rand_coef_data_pred Covariate data for grouped random coefficients
* \param gp_coords_data_pred Coordinates (features) for Gaussian process
* \param gp_rand_coef_data_pred Covariate data for Gaussian process random coefficients
* \param use_saved_data If true, saved data is used and some arguments are ignored
* \param vecchia_pred_type Type of Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions, "latent_order_obs_first_cond_obs_only" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are only observed points, "latent_order_obs_first_cond_all" = Vecchia approximation for the latent process and observed data is ordered first and neighbors are selected among all points
* \param num_neighbors_pred The number of neighbors used in the Vecchia approximation for making predictions (-1 means that the value already set at initialization is used)
*/
void Predict(const double* cov_pars_pred, const double* y_obs, data_size_t num_data_pred,
double* out_predict, bool predict_cov_mat = false,
const double* covariate_data_pred = nullptr, const double* coef_pred = nullptr,
const gp_id_t* cluster_ids_data_pred = nullptr, const char* re_group_data_pred = nullptr,
const double* re_group_rand_coef_data_pred = nullptr, double* gp_coords_data_pred = nullptr,
const double* gp_rand_coef_data_pred = nullptr, bool use_saved_data = false,
const char* vecchia_pred_type = nullptr, int num_neighbors_pred = -1) {
//Should previously set data be used?
std::vector<std::vector<string_t>> re_group_levels_pred;//Matrix with group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
if (use_saved_data) {
re_group_levels_pred = re_group_levels_pred_;
if (cluster_ids_data_pred_.empty()) {
cluster_ids_data_pred = nullptr;
}
else {
cluster_ids_data_pred = cluster_ids_data_pred_.data();
}
if (re_group_rand_coef_data_pred_.empty()) {
re_group_rand_coef_data_pred = nullptr;
}
else {
re_group_rand_coef_data_pred = re_group_rand_coef_data_pred_.data();
}
if (gp_coords_data_pred_.empty()) {
gp_coords_data_pred = nullptr;
}
else {
gp_coords_data_pred = gp_coords_data_pred_.data();
}
if (gp_rand_coef_data_pred_.empty()) {
gp_rand_coef_data_pred = nullptr;
}
else {
gp_rand_coef_data_pred = gp_rand_coef_data_pred_.data();
}
if (covariate_data_pred_.empty()) {
covariate_data_pred = nullptr;
}
else {
covariate_data_pred = covariate_data_pred_.data();
}
}
else {
if (num_re_group_ > 0) {
if (re_group_data_pred == nullptr) {
Log::Fatal("No group data is provided for making predictions");
}
else {
//For grouped random effecst: create matrix 're_group_levels_pred' (vector of vectors, dimension: num_re_group_ x num_data_) with strings of group levels from characters in 'const char* re_group_data_pred'
re_group_levels_pred = std::vector<std::vector<string_t>>(num_re_group_, std::vector<string_t>(num_data_pred));
ConvertCharToStringGroupLevels(num_data_pred, num_re_group_, re_group_data_pred, re_group_levels_pred);
}
}
}
//Some checks
CHECK(num_data_pred > 0);
if (has_covariates_) {
CHECK(covariate_data_pred != nullptr);
CHECK(coef_pred != nullptr);
}
if (y_obs == nullptr) {
if (y_.empty()) {
Log::Fatal("Observed data is not provided and has not been set before");
}
}
//Check whether some data is missing
if (re_group_rand_coef_data_pred == nullptr && num_re_group_rand_coef_ > 0) {
Log::Fatal("No covariate data for grouped random coefficients is provided for making predictions");
}
if (gp_coords_data_pred == nullptr && num_gp_ > 0) {
Log::Warning("No coordinate data for the Gaussian process is provided for making predictions");
}
if (gp_rand_coef_data_pred == nullptr && num_gp_rand_coef_ > 0) {
Log::Warning("No covariate data for Gaussian process random coefficients is provided for making predictions");
}
if (num_data_pred > 10000 && predict_cov_mat) {
double num_mem_d = ((double)num_data_pred) * ((double)num_data_pred);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
Log::Warning("The covariance matrix can be very large for large sample sizes which might lead to memory limitations. In your case (n = %d), the covariance needs at least approximately %d mb of memory. If you only need variances or covariances for linear combinations, contact the developer of this package and ask to implement this feature.", num_data_pred, mem_size);
}
if (vecchia_approx_) {
if (vecchia_pred_type != nullptr) {
string_t vecchia_pred_type_S = std::string(vecchia_pred_type);
CHECK(vecchia_pred_type_S == "order_obs_first_cond_obs_only" ||
vecchia_pred_type_S == "order_obs_first_cond_all" ||
vecchia_pred_type_S == "order_pred_first" ||
vecchia_pred_type_S == "latent_order_obs_first_cond_obs_only" ||
vecchia_pred_type_S == "latent_order_obs_first_cond_all");
vecchia_pred_type_ = vecchia_pred_type_S;
}
if (num_neighbors_pred > 0) {
num_neighbors_pred_ = num_neighbors_pred;
}
}
vec_t coef;
if (has_covariates_) {
coef = Eigen::Map<const vec_t>(coef_pred, num_coef_);
den_mat_t X_pred = Eigen::Map<const den_mat_t>(covariate_data_pred, num_data_pred, num_coef_);
vec_t mu = X_pred * coef;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred; ++i) {
out_predict[i] = mu[i];
}
}
vec_t cov_pars = Eigen::Map<const vec_t>(cov_pars_pred, num_cov_par_);
//Set up cluster IDs
std::map<gp_id_t, int> num_data_per_cluster_pred;
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_pred;
std::vector<gp_id_t> unique_clusters_pred;
data_size_t num_clusters_pred;
SetUpGPIds(num_data_pred, cluster_ids_data_pred, num_data_per_cluster_pred,
data_indices_per_cluster_pred, unique_clusters_pred, num_clusters_pred);
//Check whether predictions are made for existing clusters or if only for new independet clusters predictions are made
bool pred_for_observed_data = false;
for (const auto& cluster_i : unique_clusters_pred) {
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) != unique_clusters_.end()) {
pred_for_observed_data = true;
break;
}
}
//Factorize covariance matrix and calculate Psi^{-1}y_obs (if required for prediction)
if (pred_for_observed_data) {//TODO: this acutally needs to be done only for the GP realizations for which predictions are made (currently it is done for all of them in unique_clusters_pred)
if (has_covariates_) {
vec_t resid;
if (y_obs != nullptr) {
vec_t y = Eigen::Map<const vec_t>(y_obs, num_data_);
resid = y - (X_ * coef);
}
else {
resid = y_vec_ - (X_ * coef);
}
SetY(resid.data());
}
else {
if (y_obs != nullptr) {
SetY(y_obs);
}
}
SetCovParsComps(cov_pars);
if (!vecchia_approx_) {
CalcCovFactor(false, true, 1., false);//no need to do this for the Vecchia approximation, is done in the prediction steps
CalcYAux();
}
}//end if(pred_for_observed_data)
//Initialize covariance matrix
if (predict_cov_mat) {//TODO: avoid unnecessary initialization (only set to 0 for covariances accross different realizations of GPs)
#pragma omp parallel for schedule(static)
for (int i = 0; i < (num_data_pred * num_data_pred); ++i) {
out_predict[i + num_data_pred] = 0.;
}
}
for (const auto& cluster_i : unique_clusters_pred) {
//no data observed for this Gaussian process with ID 'cluster_i'. Thus use prior mean (0) and prior covariance matrix
if (std::find(unique_clusters_.begin(), unique_clusters_.end(), cluster_i) == unique_clusters_.end()) {
if (!has_covariates_) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = 0.;
}
}
if (predict_cov_mat) {
T1 psi;
std::vector<std::shared_ptr<RECompBase<T1>>> re_comps_cluster_i;
if (vecchia_approx_) {
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
std::vector<Triplet_t> entries_init_B_cluster_i;
std::vector<Triplet_t> entries_init_B_grad_cluster_i;
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_per_cluster_pred[cluster_i]);
CreateREComponentsVecchia(num_data_pred, data_indices_per_cluster_pred, cluster_i, num_data_per_cluster_pred,
gp_coords_data_pred, dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i, "none", num_neighbors_pred_);//TODO: maybe also use ordering for making predictions? (need to check that there are not errors)
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
}
sp_mat_t B_cluster_i;
sp_mat_t D_inv_cluster_i;
std::vector<sp_mat_t> B_grad_cluster_i;//not used, but needs to be passed to function
std::vector<sp_mat_t> D_grad_cluster_i;//not used, but needs to be passed to function
CalcCovFactorVecchia(num_data_per_cluster_pred[cluster_i], false, re_comps_cluster_i,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i,
entries_init_B_cluster_i, entries_init_B_grad_cluster_i,
z_outer_z_obs_neighbors_cluster_i,
B_cluster_i, D_inv_cluster_i, B_grad_cluster_i, D_grad_cluster_i);
//Calculate Psi
sp_mat_t D_sqrt(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
D_sqrt.setIdentity();
D_sqrt.diagonal().array() = D_inv_cluster_i.diagonal().array().pow(-0.5);
sp_mat_t B_inv_D_sqrt;
eigen_sp_Lower_sp_RHS_cs_solve(B_cluster_i, D_sqrt, B_inv_D_sqrt, true);
psi = B_inv_D_sqrt * B_inv_D_sqrt.transpose();
}//end Vecchia
else {
psi.resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
psi.setIdentity();
CreateREComponents(num_data_pred, num_re_group_, data_indices_per_cluster_pred, cluster_i, re_group_levels_pred, num_data_per_cluster_pred,
num_re_group_rand_coef_, re_group_rand_coef_data_pred, ind_effect_group_rand_coef_, num_gp_, gp_coords_data_pred,
dim_gp_coords_, gp_rand_coef_data_pred, num_gp_rand_coef_, cov_fct_, cov_fct_shape_, ind_intercept_gp_, re_comps_cluster_i);
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_cluster_i[j]->SetCovPars(pars);
re_comps_cluster_i[j]->CalcSigma();
psi += (*(re_comps_cluster_i[j]->GetZSigmaZt().get()));
}
}//end not Vecchia
psi *= cov_pars[0];
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = psi.coeff(j, i);
}
}
}//end predict_cov_mat
}//end cluster_i with no observed data
else {//there exists observed data for this cluster_i (= typical case)
den_mat_t gp_coords_mat_pred;
if (num_gp_ > 0) {
std::vector<double> gp_coords_pred;
for (int j = 0; j < dim_gp_coords_; ++j) {
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
gp_coords_pred.push_back(gp_coords_data_pred[j * num_data_pred + id]);
}
}
gp_coords_mat_pred = Eigen::Map<den_mat_t>(gp_coords_pred.data(), num_data_per_cluster_pred[cluster_i], dim_gp_coords_);
}
vec_t mean_pred_id(num_data_per_cluster_pred[cluster_i]);
T1 cov_mat_pred_id;
if (predict_cov_mat) {
cov_mat_pred_id = T1(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
}
if (vecchia_approx_) {
std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][ind_intercept_gp_]);
int num_data_tot = num_data_per_cluster_[cluster_i] + num_data_per_cluster_pred[cluster_i];
double num_mem_d = ((double)num_neighbors_pred_) * ((double)num_neighbors_pred_) * (double)(num_data_tot)+(double)(num_neighbors_pred_) * (double)(num_data_tot);
int mem_size = (int)(num_mem_d * 8. / 1000000.);
if (mem_size > 4000) {
Log::Warning("The current implementation of the Vecchia approximation needs a lot of memory if the number of neighbors is large. In your case (nb. of neighbors = %d, nb. of observations = %d, nb. of predictions = %d), this needs at least approximately %d mb of memory. If this is a problem for you, contact the developer of this package and ask to change this.", num_neighbors_pred_, num_data_per_cluster_[cluster_i], num_data_per_cluster_pred[cluster_i], mem_size);
}
if (vecchia_pred_type_ == "order_obs_first_cond_obs_only") {
CalcPredVecchiaObservedFirstOrder(true, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_obs_first_cond_all") {
CalcPredVecchiaObservedFirstOrder(false, cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "order_pred_first") {
CalcPredVecchiaPredictedFirstOrder(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_obs_only") {
CalcPredVecchiaLatentObservedFirstOrder(true, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
else if (vecchia_pred_type_ == "latent_order_obs_first_cond_all") {
CalcPredVecchiaLatentObservedFirstOrder(false, cluster_i, num_data_per_cluster_pred,
re_comp->coords_, gp_coords_mat_pred, predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}
}//end Vecchia approximation
else {
CalcPred(cluster_i, num_data_pred, num_data_per_cluster_pred, data_indices_per_cluster_pred,
re_group_levels_pred, re_group_rand_coef_data_pred, gp_coords_mat_pred, gp_rand_coef_data_pred,
predict_cov_mat, mean_pred_id, cov_mat_pred_id);
}//end not Vecchia approximation
//write on output
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {
if (has_covariates_) {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] += mean_pred_id[i];
}
else {
out_predict[data_indices_per_cluster_pred[cluster_i][i]] = mean_pred_id[i];
}
}
if (predict_cov_mat) {
cov_mat_pred_id *= cov_pars[0];
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_pred[cluster_i]; ++i) {//column index
for (int j = 0; j < num_data_per_cluster_pred[cluster_i]; ++j) {//row index
out_predict[data_indices_per_cluster_pred[cluster_i][i] * num_data_pred + data_indices_per_cluster_pred[cluster_i][j] + num_data_pred] = cov_mat_pred_id.coeff(j, i);//cov_mat_pred_id_den(j, i);
}
}
}
}//end cluster_i with data
}//end loop over cluster
}
/*!
* \brief Find "reasonable" default values for the intial values of the covariance parameters (on transformed scale)
* Note: You should pre-allocate memory for optim_cov_pars (length = number of covariance parameters)
* \param y_data Response variable data
* \param[out] init_cov_pars Initial values for covariance parameters of RE components
*/
void FindInitCovPar(const double* y_data, double* init_cov_pars) {
double mean = 0;
for (int i = 0; i < num_data_; ++i) {
mean += y_data[i];
}
mean /= num_data_;
double var = 0;
for (int i = 0; i < num_data_; ++i) {
var += (y_data[i] - mean) * (y_data[i] - mean);
}
var /= (num_data_ - 1);
init_cov_pars[0] = var;
int ind_par = 1;
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_j = ind_par_[j + 1] - ind_par_[j];
vec_t pars = vec_t(num_par_j);
re_comps_[unique_clusters_[0]][j]->FindInitCovPar(pars);
for (int jj = 0; jj < num_par_j; ++jj) {
init_cov_pars[ind_par] = pars[jj];
ind_par++;
}
}
}
int num_cov_par() {
return(num_cov_par_);
}
//void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
// if (vecchia_approx_) {
// den_mat_t BX = B_[unique_clusters_[0]] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX;
// }
// else {
// XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X);
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// den_mat_t BX;
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all));
// }
// }
// }
//}
/*!
* \brief Calculate the leaf values when performing a Newton update step after the tree structure has been found in tree-boosting
* Note: only used in GPBoost for tree-boosting (this is called from regression_objective). It is assume that 'CalcYAux' has been called before.
* \param data_leaf_index Leaf index for every data point (array of size num_data)
* \param num_leaves Number of leaves
* \param[out] leaf_values Leaf values when performing a Newton update step (array of size num_leaves)
* \param marg_variance The marginal variance. Default = 1. Can be used to multiply values by it since Newton updates do not depend on it but 'CalcYAux' might have been called using marg_variance!=1.
*/
void NewtonUpdateLeafValues(const int* data_leaf_index,
const int num_leaves, double* leaf_values, double marg_variance = 1.) {
CHECK(y_aux_has_been_calculated_);
den_mat_t HTPsiInvH(num_leaves, num_leaves);
vec_t HTYAux(num_leaves);
HTPsiInvH.setZero();
HTYAux.setZero();
for (const auto& cluster_i : unique_clusters_) {
//Entries for matrix H_cluster_i = incidence matrix H that relates tree leaves to observations for cluster_i
std::vector<Triplet_t> entries_H_cluster_i(num_data_per_cluster_[cluster_i]);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_per_cluster_[cluster_i]; ++i) {
entries_H_cluster_i[i] = Triplet_t(i, data_leaf_index[data_indices_per_cluster_[cluster_i][i]], 1.);
}
if (vecchia_approx_) {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);//row major format is needed for Vecchia approx.
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
sp_mat_t BH = B_[cluster_i] * H_cluster_i;
den_mat_t HTPsiInvH_cluster_i = den_mat_t(BH.transpose() * D_inv_[cluster_i] * BH);
HTPsiInvH += HTPsiInvH_cluster_i;
}
else {
sp_mat_t H_cluster_i(num_data_per_cluster_[cluster_i], num_leaves);
H_cluster_i.setFromTriplets(entries_H_cluster_i.begin(), entries_H_cluster_i.end());
HTYAux -= H_cluster_i.transpose() * y_aux_[cluster_i];//minus sign since y_aux_ has been calculated on the gradient = F-y (and not y-F)
T1 PsiInvSqrtH;
CalcPsiInvSqrtH(PsiInvSqrtH, H_cluster_i, cluster_i);
den_mat_t HTPsiInvH_cluster_i = PsiInvSqrtH.transpose() * PsiInvSqrtH;
HTPsiInvH += HTPsiInvH_cluster_i;
/* Log::Info("H_cluster_i[:,0] = %f, %f, %f, %f, %f, %f", H_cluster_i.coeffRef(0, 0), H_cluster_i.coeffRef(1, 0), H_cluster_i.coeffRef(2, 0), H_cluster_i.coeffRef(3, 0), H_cluster_i.coeffRef(4, 0), H_cluster_i.coeffRef(5, 0));
Log::Info("H_cluster_i[:,1] = %f, %f, %f, %f, %f, %f", H_cluster_i.coeffRef(0, 1), H_cluster_i.coeffRef(1, 1), H_cluster_i.coeffRef(2, 1), H_cluster_i.coeffRef(3, 1), H_cluster_i.coeffRef(4, 1), H_cluster_i.coeffRef(5, 1));*/
}
}
//Log::Info("marg_variance: %f", marg_variance);
HTYAux *= marg_variance;
vec_t new_leaf_values = HTPsiInvH.llt().solve(HTYAux);
for (int i = 0; i < num_leaves; ++i) {
leaf_values[i] = new_leaf_values[i];
}
//Log::Info("HTYAux[:] = %f, %f", HTYAux(0), HTYAux(1));
//Log::Info("HTPsiInvH[0,:] = %f, %f", HTPsiInvH(0, 0), HTPsiInvH(0, 1));
//Log::Info("HTPsiInvH[1,:] = %f, %f", HTPsiInvH(1, 0), HTPsiInvH(1, 1));
}
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points */
std::map<gp_id_t, std::vector<int>> data_indices_per_cluster_;
/*! \brief Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization */
std::map<gp_id_t, int> num_data_per_cluster_;
/*! \brief Number of independent realizations of the REs/GPs */
data_size_t num_clusters_;
/*! \brief Unique labels of independent realizations */
std::vector<gp_id_t> unique_clusters_;
/*! \brief Number of grouped (intercept) random effects */
data_size_t num_re_group_ = 0;
/*! \brief Number of grouped random coefficients */
data_size_t num_re_group_rand_coef_ = 0;
/*! \brief Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting starts at 1 (and ends at the number of base intercept random effects). Length of vector = num_re_group_rand_coef_. */
std::vector<int> ind_effect_group_rand_coef_;
/*! \brief Total number of grouped random effects (random intercepts plus random coefficients (slopes)) */
data_size_t num_re_group_total_ = 0;
/*! \brief 1 if there is a Gaussian process 0 otherwise */
data_size_t num_gp_ = 0;
/*! \brief Type of GP. 0 = classical (spatial) GP, 1 = spatio-temporal GP */ //TODO: remove?
int8_t GP_type_ = 0;
/*! \brief Number of random coefficient GPs */
data_size_t num_gp_rand_coef_ = 0;
/*! \brief Total number of GPs (random intercepts plus random coefficients) */
data_size_t num_gp_total_ = 0;
/*! \brief Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs */
int ind_intercept_gp_;
/*! \brief Dimension of the coordinates (=number of features) for Gaussian process */
int dim_gp_coords_ = 2;//required to save since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Type of covariance(kernel) function for Gaussian processes */
string_t cov_fct_ = "exponential";//required to also save here since it is needed in the Predict() function when predictions are made for new independent realizations of GPs
/*! \brief Shape parameter of covariance function (=smoothness parameter for Matern covariance) */
double cov_fct_shape_ = 0.;
/*! \brief Keys: labels of independent realizations of REs/GPs, values: vectors with individual RE/GP components */
std::map<gp_id_t, std::vector<std::shared_ptr<RECompBase<T1>>>> re_comps_;
/*! \brief Indices of parameters of RE components in global parameter vector cov_pars. ind_par_[i] + 1 and ind_par_[i+1] are the indices of the first and last parameter of component number i */
std::vector<data_size_t> ind_par_;
/*! \brief Number of covariance parameters */
data_size_t num_cov_par_;
/*! \brief Total number of random effect components (grouped REs plus other GPs) */
data_size_t num_comps_total_ = 0;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Symbolic Cholesky decomposition of Psi matrices */
std::map<gp_id_t, T2> chol_facts_solve_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Cholesky factors of Psi matrices */ //TODO: above needed or can pattern be saved somewhere else?
std::map<gp_id_t, T1> chol_facts_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: **** */ //TODO: remove?
std::map<gp_id_t, T1> Id_;
/*! \brief Key: labels of independent realizations of REs/GPs, values: Idendity matrices used for calculation of inverse covariance matrix **** */
std::map<gp_id_t, cs> Id_cs_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: data y */
std::map<gp_id_t, vec_t> y_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */
std::map<gp_id_t, vec_t> y_aux_;
/*! \brief Indicates whether y_aux_ has been calculated */
bool y_aux_has_been_calculated_ = false;
/*! \brief Copy of response data (used only in case there are also linear covariates since then y_ is modified during the algorithm) */
vec_t y_vec_;
/*! \brief Key: labels of independent realizations of REs/GPs, value: Psi^-1*y_ (used for various computations) */
bool do_symbolic_decomposition_ = true;
/*! \brief If true, the model linearly incluses covariates */
bool has_covariates_ = false;
/*! \brief Number of covariates */
int num_coef_;
/*! \brief Covariate data */
den_mat_t X_;
/*! \brief List of supported optimizers for covariance parameters */
const std::set<string_t> SUPPORTED_OPTIM_COV_PAR_{ "gradient_descent", "fisher_scoring" };
/*! \brief List of supported optimizers for regression coefficients */
const std::set<string_t> SUPPORTED_OPTIM_COEF_{ "gradient_descent", "wls" };
/*! \brief If true, the Veccia approximation is used for the Gaussian process */
bool vecchia_approx_ = false;
/*! \brief The number of neighbors used in the Vecchia approximation */
int num_neighbors_;
/*! \brief Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering */
string_t vecchia_ordering_ = "none";
/*! \brief The number of neighbors used in the Vecchia approximation for making predictions */
int num_neighbors_pred_;
/*! \brief Ordering used in the Vecchia approximation for making predictions. "order_obs_first_cond_obs_only" = observed data is ordered first and neighbors are only observed points, "order_obs_first_cond_all" = observed data is ordered first and neighbors are selected among all points (observed + predicted), "order_pred_first" = predicted data is ordered first for making predictions */
string_t vecchia_pred_type_ = "order_obs_first_cond_obs_only";//This is saved here and not simply set in the prediction function since it needs to be used repeatedly in the GPBoost algorithm when making predictions in "regression_metric.hpp" and the way predictions are done for the Vecchia approximation should be decoupled from the boosting algorithm
/*! \brief List of supported covariance functions */
const std::set<string_t> SUPPORTED_VECCHIA_PRED_TYPES_{ "order_obs_first_cond_obs_only",
"order_obs_first_cond_all", "order_pred_first",
"latent_order_obs_first_cond_obs_only", "latent_order_obs_first_cond_all" };
/*! \brief Collects indices of nearest neighbors (used for Vecchia approximation) */
std::map<gp_id_t, std::vector<std::vector<int>>> nearest_neighbors_;
/*! \brief Distances between locations and their nearest neighbors (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_obs_neighbors_;
/*! \brief Distances between nearest neighbors for all locations (this is used only if the Vecchia approximation is used, otherwise the distances are saved directly in the base GP component) */
std::map<gp_id_t, std::vector<den_mat_t>> dist_between_neighbors_;//TODO: this contains duplicate information (i.e. distances might be saved reduntly several times). But there is a trade-off between storage and computational speed. I currently don't see a way for saving unique distances without copying them when using the^m.
/*! \brief Outer product of covariate vector at observations and neighbors with itself. First index = cluster, second index = data point i, third index = GP number j (this is used only if the Vecchia approximation is used, this is handled saved directly in the GP component using Z_) */
std::map<gp_id_t, std::vector<std::vector<den_mat_t>>> z_outer_z_obs_neighbors_;
/*! \brief Collects matrices B = I - A (=Cholesky factor of inverse covariance) for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> B_;
/*! \brief Collects diagonal matrices D^-1 for Vecchia approximation */
std::map<gp_id_t, sp_mat_t> D_inv_;
/*! \brief Collects derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> B_grad_;
/*! \brief Collects derivatives of matrices D for Vecchia approximation */
std::map<gp_id_t, std::vector<sp_mat_t>> D_grad_;
/*! \brief Triplets for intializing the matrices B */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_;
/*! \brief Triplets for intializing the matrices B_grad */
std::map<gp_id_t, std::vector<Triplet_t>> entries_init_B_grad_;
/*! \brief Variance of idiosyncratic error term (nugget effect) */
double sigma2_;
/*! \brief Cluster IDs for prediction */
std::vector<gp_id_t> cluster_ids_data_pred_;
/*! \brief Levels of grouped RE for prediction */
std::vector<std::vector<string_t>> re_group_levels_pred_;
/*! \brief Covariate data for grouped random RE for prediction */
std::vector<double> re_group_rand_coef_data_pred_;
/*! \brief Coordinates for GP for prediction */
std::vector<double> gp_coords_data_pred_;
/*! \brief Covariate data for random GP for prediction */
std::vector<double> gp_rand_coef_data_pred_;
/*! \brief Covariate data for linear regression term */
std::vector<double> covariate_data_pred_;
/*! \brief Nesterov schedule */
double NesterovSchedule(int iter, int momentum_schedule_version = 0,
double nesterov_acc_rate = 0.5, int momentum_offset = 2) {
if (iter < momentum_offset) {
return(0.);
}
else {
if (momentum_schedule_version == 0) {
return(nesterov_acc_rate);
}
else if (momentum_schedule_version == 1) {
return(1. - (3. / (6. + iter)));
}
else {
return(0.);
}
}
}
/*! \brief mutex for threading safe call */
std::mutex mutex_;
/*! \brief Constructs identity matrices if sparse matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
T3 I(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
cs Id_cs = cs();//same for cs type //TODO: construct this independently of Id_ , but then care need to be taken for deleting the pointer objects.
Id_cs.nzmax = num_data_per_cluster_[cluster_i];
Id_cs.m = num_data_per_cluster_[cluster_i];
Id_cs.n = num_data_per_cluster_[cluster_i];
Id_[cluster_i].makeCompressed();
Id_cs.p = reinterpret_cast<csi*>(Id_[cluster_i].outerIndexPtr());
Id_cs.i = reinterpret_cast<csi*>(Id_[cluster_i].innerIndexPtr());
Id_cs.x = Id_[cluster_i].valuePtr();
Id_cs.nz = -1;
Id_cs_.insert({ cluster_i, Id_cs });
}
/*! \brief Constructs identity matrices if dense matrices are used (used for calculating inverse covariance matrix) */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void ConstructI(gp_id_t cluster_i) {
T3 I(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);//identity matrix for calculating precision matrix
I.setIdentity();
Id_.insert({ cluster_i, I });
}
/*!
* \brief Set response variable data (y_) for RE model
* \param y_data Response variable data
*/
void SetY(const double* y_data) {
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
y_[unique_clusters_[0]] = Eigen::Map<const vec_t>(y_data, num_data_);
//y_[unique_clusters_[0]] = vec_t(num_data_);
//y_[unique_clusters_[0]].setZero();
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_[cluster_i] = vec_t(num_data_per_cluster_[cluster_i]);//TODO: Is there a more efficient way that avoids copying?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_[cluster_i][j] = y_data[data_indices_per_cluster_[cluster_i][j]];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). Array needs to be pre-allocated of length num_data_
*/
void GetYAux(double* y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
for (int j = 0; j < num_data_; ++j) {
y_aux[j] = y_aux_[unique_clusters_[0]][j];
}
}
else {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
y_aux[data_indices_per_cluster_[cluster_i][j]] = y_aux_[cluster_i][j];
}
}
}
}
/*!
* \brief Get y_aux = Psi^-1*y
* \param[out] y_aux Psi^-1*y (=y_aux_). This vector needs to be pre-allocated of length num_data_
*/
void GetYAux(vec_t& y_aux) {
CHECK(y_aux_has_been_calculated_);
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
y_aux = y_aux_[unique_clusters_[0]];
}
else {
for (const auto& cluster_i : unique_clusters_) {
y_aux(data_indices_per_cluster_[cluster_i]) = y_aux_[cluster_i];
}
}
}
/*! \brief Do Cholesky decomposition if sparse matrices are used */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
chol_facts_solve_[cluster_i].analyzePattern(psi);
}
chol_facts_solve_[cluster_i].factorize(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
chol_facts_[cluster_i].makeCompressed();
}
/*! \brief Do Cholesky decomposition if dense matrices are used */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcChol(T3& psi, gp_id_t cluster_i, bool analyze_pattern) {
if (analyze_pattern) {
Log::Warning("Pattern of Cholesky factor is not analyzed when dense matrices are used.");
}
chol_facts_solve_[cluster_i].compute(psi);
chol_facts_[cluster_i] = chol_facts_solve_[cluster_i].matrixL();
}
/*! \brief Caclulate Psi^(-1) if sparse matrices are used */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
//Using CSparse function 'cs_spsolve'
cs L_cs = cs();//Prepare LHS
L_cs.nzmax = (int)chol_facts_[cluster_i].nonZeros();
L_cs.m = num_data_per_cluster_[cluster_i];
L_cs.n = num_data_per_cluster_[cluster_i];
L_cs.p = reinterpret_cast<csi*>(chol_facts_[cluster_i].outerIndexPtr());
L_cs.i = reinterpret_cast<csi*>(chol_facts_[cluster_i].innerIndexPtr());
L_cs.x = chol_facts_[cluster_i].valuePtr();
L_cs.nz = -1;
sp_mat_t L_inv;
sp_Lower_sp_RHS_cs_solve(&L_cs, &Id_cs_[cluster_i], L_inv, true);
psi_inv = L_inv.transpose() * L_inv;
////Version 2: doing sparse solving "by hand" but ignoring sparse RHS
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]);
//for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]);
//}
//const sp_mat_t L_inv = L_inv_dens.sparseView();
//psi_inv = L_inv.transpose() * L_inv;
////Version 1
//cpsi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
}
/*! \brief Caclulate Psi^(-1) if dense matrices are used */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInv(T3& psi_inv, gp_id_t cluster_i) {
////Version 1
//psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
//Version 2: solving by hand
T3 L_inv = Id_[cluster_i];
#pragma omp parallel for schedule(static)//TODO: maybe sometimes faster without parallelization?
for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], L_inv.data() + j * num_data_per_cluster_[cluster_i]);
}
//chol_facts_[cluster_i].triangularView<Eigen::Lower>().solveInPlace(L_inv); //slower
psi_inv = L_inv.transpose() * L_inv;
// Using dpotri from LAPACK does not work since LAPACK is not installed
//int info = 0;
//int n = num_data_per_cluster_[cluster_i];
//int lda = num_data_per_cluster_[cluster_i];
//char* uplo = "L";
//den_mat_t M = chol_facts_[cluster_i];
//BLASFUNC(dpotri)(uplo, &n, M.data(), &lda, &info);
}
/*! \brief Caclulate Psi^(-0.5)H if dense matrices are used. Used in 'NewtonUpdateLeafValues' */
template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) {
PsiInvSqrtH = den_mat_t(H);
#pragma omp parallel for schedule(static)
for (int j = 0; j < H.cols(); ++j) {
L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], PsiInvSqrtH.data() + j * num_data_per_cluster_[cluster_i]);
}
}
/*! \brief Caclulate Psi^(-0.5)H if sparse matrices are used. Used in 'NewtonUpdateLeafValues' */
template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
void CalcPsiInvSqrtH(T3& PsiInvSqrtH, sp_mat_t& H, gp_id_t cluster_i) {
//Using CSparse function 'cs_spsolve'
eigen_sp_Lower_sp_RHS_cs_solve(chol_facts_[cluster_i], H, PsiInvSqrtH, true);
}
///*!
//* \brief Caclulate X^TPsi^(-1)X
//* \param X Covariate data matrix X
//* \param[out] XT_psi_inv_X X^TPsi^(-1)X
//*/
// template <class T3, typename std::enable_if< std::is_same<den_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// L_solve(chol_facts_[cluster0].data(), num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// L_solve(chol_facts_[cluster_i].data(), num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
// //same for sparse matrices
// template <class T3, typename std::enable_if< std::is_same<sp_mat_t, T3>::value>::type * = nullptr >
// void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
// den_mat_t BX;
// if (num_clusters_ == 1) {
// gp_id_t cluster0 = unique_clusters_[0];
// if (vecchia_approx_) {
// BX = B_[cluster0] * X;
// XT_psi_inv_X = BX.transpose() * D_inv_[cluster0] * BX;
// }
// else {
// BX = X;
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster0]; ++j) {
// sp_L_solve(chol_facts_[cluster0].valuePtr(), chol_facts_[cluster0].innerIndexPtr(), chol_facts_[cluster0].outerIndexPtr(),
// num_data_per_cluster_[cluster0], BX.data() + j * num_data_per_cluster_[cluster0]);
// }
// XT_psi_inv_X = BX.transpose() * BX;
// }
// }
// else {
// XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
// XT_psi_inv_X.setZero();
// for (const auto& cluster_i : unique_clusters_) {
// if (vecchia_approx_) {
// BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
// XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
// }
// else {
// BX = X(data_indices_per_cluster_[cluster_i], Eigen::all);
// #pragma omp parallel for schedule(static)
// for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
// sp_L_solve(chol_facts_[cluster_i].valuePtr(), chol_facts_[cluster_i].innerIndexPtr(), chol_facts_[cluster_i].outerIndexPtr(),
// num_data_per_cluster_[cluster_i], BX.data() + j * num_data_per_cluster_[cluster_i]);
// }
// XT_psi_inv_X += (BX.transpose() * BX);
// }
// }
// }
// }
/*!
* \brief Caclulate X^TPsi^(-1)X
* \param X Covariate data matrix X
* \param[out] XT_psi_inv_X X^TPsi^(-1)X
*/
void CalcXTPsiInvX(const den_mat_t& X, den_mat_t& XT_psi_inv_X) {
if (num_clusters_ == 1 && vecchia_ordering_ == "none") {
if (vecchia_approx_) {
den_mat_t BX = B_[unique_clusters_[0]] * X;
XT_psi_inv_X = BX.transpose() * D_inv_[unique_clusters_[0]] * BX;
}
else {
XT_psi_inv_X = X.transpose() * chol_facts_solve_[unique_clusters_[0]].solve(X);
}
}
else {
XT_psi_inv_X = den_mat_t(X.cols(), X.cols());
XT_psi_inv_X.setZero();
den_mat_t BX;
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
BX = B_[cluster_i] * X(data_indices_per_cluster_[cluster_i], Eigen::all);
XT_psi_inv_X += BX.transpose() * D_inv_[cluster_i] * BX;
}
else {
XT_psi_inv_X += ((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all)).transpose() * chol_facts_solve_[cluster_i].solve((den_mat_t)X(data_indices_per_cluster_[cluster_i], Eigen::all));
}
}
}
}
/*!
* \brief Initialize data structures for handling independent realizations of the Gaussian processes. Answers written on arguments.
* \param num_data Number of data points
* \param cluster_ids_data IDs / labels indicating independent realizations of Gaussian processes (same values = same process realization)
* \param[out] num_data_per_cluster Keys: labels of independent clusters, values: number of data points per independent realization
* \param[out] data_indices_per_cluster Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param[out] unique_clusters Unique labels of independent realizations
* \param[out] num_clusters Number of independent clusters
*/
void SetUpGPIds(data_size_t num_data, const gp_id_t* cluster_ids_data,
std::map<gp_id_t, int>& num_data_per_cluster, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster,
std::vector<gp_id_t>& unique_clusters, data_size_t& num_clusters) {
if (cluster_ids_data != nullptr) {
for (int i = 0; i < num_data; ++i) {
if (num_data_per_cluster.find(cluster_ids_data[i]) == num_data_per_cluster.end()) {//first occurrence of cluster_ids_data[i]
unique_clusters.push_back(cluster_ids_data[i]);
num_data_per_cluster.insert({ cluster_ids_data[i], 1 });
std::vector<int> id;
id.push_back(i);
data_indices_per_cluster.insert({ cluster_ids_data[i], id });
}
else {
num_data_per_cluster[cluster_ids_data[i]] += 1;
data_indices_per_cluster[cluster_ids_data[i]].push_back(i);
}
}
num_clusters = (data_size_t)unique_clusters.size();
}
else {
unique_clusters.push_back(0);
num_data_per_cluster.insert({ 0, num_data });
num_clusters = 1;
std::vector<int> gp_id_vec(num_data);
for (int i = 0; i < num_data; ++i) {
gp_id_vec[i] = i;
}
data_indices_per_cluster.insert({ 0, gp_id_vec });
}
}
/*!
* \brief Convert characters in 'const char* re_group_data' to matrix (num_re_group x num_data) with strings of group labels
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param re_group_data Labels of group levels for the grouped random effects in column-major format (i.e. first the levels for the first effect, then for the second, etc.). Every group label needs to end with the null character '\0'
* \param[out] Matrix of dimension num_re_group x num_data with strings of group labels for levels of grouped random effects
*/
void ConvertCharToStringGroupLevels(data_size_t num_data, data_size_t num_re_group,
const char* re_group_data, std::vector<std::vector<string_t>>& re_group_levels) {
int char_start = 0;
for (int ire = 0; ire < num_re_group; ++ire) {//TODO: catch / report potential error if format of re_group_data is not correct
for (int id = 0; id < num_data; ++id) {
int number_chars = 0;
while (re_group_data[char_start + number_chars] != '\0') {
number_chars++;
}
re_group_levels[ire][id] = std::string(re_group_data + char_start);
char_start += number_chars + 1;
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter
* \param num_data Number of data points
* \param num_re_group Number of grouped random effects
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param Group levels for every grouped random effect
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param num_re_group_rand_coef Number of grouped random coefficients
* \param re_group_rand_coef_data Covariate data for grouped random coefficients
* \param ind_effect_group_rand_coef Indices that relate every random coefficients to a "base" intercept grouped random effect. Counting start at 1.
* \param num_gp Number of Gaussian processes (intercept only, random coefficients not counting)
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param ind_intercept_gp Index in the vector of random effect components (in the values of 're_comps_') of the intercept GP associated with the random coefficient GPs
* \param[out] re_comps_cluster_i Container that collects the individual component models
*/
void CreateREComponents(data_size_t num_data, data_size_t num_re_group, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i,
std::vector<std::vector<string_t>>& re_group_levels, std::map<gp_id_t, int>& num_data_per_cluster, data_size_t num_re_group_rand_coef,
const double* re_group_rand_coef_data, std::vector<int>& ind_effect_group_rand_coef, data_size_t num_gp, const double* gp_coords_data, int dim_gp_coords,
const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape, int ind_intercept_gp,
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i) {
//Grouped REs
if (num_re_group > 0) {
for (int j = 0; j < num_re_group; ++j) {
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
group_data.push_back(re_group_levels[j][id]);//group_data_.push_back(std::string(re_group_data[j * num_data_ + id]));
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(group_data)));
}
//Random slopes
if (num_re_group_rand_coef > 0) {
for (int j = 0; j < num_re_group_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_cluster_i[ind_effect_group_rand_coef[j] - 1]);//Subtract -1 since ind_effect_group_rand_coef[j] starts counting at 1 not 0
re_comps_cluster_i.push_back(std::shared_ptr<RECompGroup<T1>>(new RECompGroup<T1>(re_comp->group_data_, re_comp->map_group_label_index_, re_comp->num_group_, rand_coef_data)));
}
}
}
//GPs
if (num_gp > 0) {
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, true)));
//Random slopes
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
std::shared_ptr<RECompGP<T1>> re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_cluster_i[ind_intercept_gp]);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(re_comp->dist_, re_comp->has_Z_,
&re_comp->Z_, rand_coef_data, cov_fct, cov_fct_shape)));
}
}
}
}
/*!
* \brief Initialize individual component models and collect them in a containter when the Vecchia approximation is used
* \param num_data Number of data points
* \param data_indices_per_cluster Keys: Labels of independent realizations of REs/GPs, values: vectors with indices for data points
* \param cluster_i Index / label of the realization of the Gaussian process for which the components should be constructed
* \param num_data_per_cluster Keys: Labels of independent realizations of REs/GPs, values: number of data points per independent realization
* \param gp_coords_data Coordinates (features) for Gaussian process
* \param dim_gp_coords Dimension of the coordinates (=number of features) for Gaussian process
* \param gp_rand_coef_data Covariate data for Gaussian process random coefficients
* \param num_gp_rand_coef Number of Gaussian process random coefficients
* \param cov_fct Type of covariance (kernel) function for Gaussian processes
* \param cov_fct_shape Shape parameter of covariance function (=smoothness parameter for Matern covariance)
* \param[out] re_comps_cluster_i Container that collects the individual component models
* \param[out] nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param[out] dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param[out] dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param[out] entries_init_B_cluster_i Triplets for intializing the matrices B
* \param[out] entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param[out] z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param vecchia_ordering Ordering used in the Vecchia approximation. "none" = no ordering, "random" = random ordering
* \param num_neighbors The number of neighbors used in the Vecchia approximation
*/
void CreateREComponentsVecchia(data_size_t num_data, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster, gp_id_t cluster_i, std::map<gp_id_t, int>& num_data_per_cluster,
const double* gp_coords_data, int dim_gp_coords, const double* gp_rand_coef_data, data_size_t num_gp_rand_coef, const string_t cov_fct, double cov_fct_shape,
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i, string_t vecchia_ordering = "none", int num_neighbors = 30) {
if (vecchia_ordering == "random") {
unsigned seed = 0;
std::shuffle(data_indices_per_cluster[cluster_i].begin(), data_indices_per_cluster[cluster_i].end(), std::default_random_engine(seed));
}
std::vector<double> gp_coords;
for (int j = 0; j < dim_gp_coords; ++j) {
for (const auto& id : data_indices_per_cluster[cluster_i]) {
gp_coords.push_back(gp_coords_data[j * num_data + id]);
}
}
den_mat_t gp_coords_mat = Eigen::Map<den_mat_t>(gp_coords.data(), num_data_per_cluster[cluster_i], dim_gp_coords);
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(gp_coords_mat, cov_fct, cov_fct_shape, false)));
find_nearest_neighbors_Veccia_fast(gp_coords_mat, num_data_per_cluster[cluster_i], num_neighbors,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
for (int j = 0; j < (int)nearest_neighbors_cluster_i[i].size(); ++j) {
entries_init_B_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
entries_init_B_grad_cluster_i.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][j], 0.));
}
entries_init_B_cluster_i.push_back(Triplet_t(i, i, 1.));//Put 1's on the diagonal since B = I - A
}
//Random coefficients
if (num_gp_rand_coef > 0) {
for (int j = 0; j < num_gp_rand_coef; ++j) {
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data[j * num_data + id]);
}
re_comps_cluster_i.push_back(std::shared_ptr<RECompGP<T1>>(new RECompGP<T1>(rand_coef_data, cov_fct, cov_fct_shape)));
//save random coefficient data in the form ot outer product matrices
#pragma omp for schedule(static)
for (int i = 0; i < num_data_per_cluster[cluster_i]; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef);
int dim_z = (i == 0) ? 1 : ((int)nearest_neighbors_cluster_i[i].size() + 1);
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
//Log::Info("coef_vec * coef_vec.transpose(): %f", (coef_vec * coef_vec.transpose())(0,0));
//Log::Info("re_comps_[cluster_i] %s ", typeid(re_comps_[cluster_i]).name());
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
}
/*!
* \brief Set the covariance parameters of the components
* \param cov_pars Covariance parameters
*/
void SetCovParsComps(const vec_t& cov_pars) {
CHECK(cov_pars.size() == num_cov_par_);
sigma2_ = cov_pars[0];
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
//const std::vector<double> pars = std::vector<double>(cov_pars.begin() + ind_par_[j] + 1, cov_pars.begin() + ind_par_[j + 1] + 1);
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
re_comps_[cluster_i][j]->SetCovPars(pars);
}
}
}
/*!
* \brief Transform the covariance parameters to the scake on which the MLE is found
* \param cov_pars_trans Covariance parameters
* \param[out] pars_trans Transformed covariance parameters
*/
void TransformCovPars(const vec_t& cov_pars, vec_t& cov_pars_trans) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_trans = vec_t(num_cov_par_);
cov_pars_trans[0] = cov_pars[0];
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
vec_t pars_trans = pars;
re_comps_[unique_clusters_[0]][j]->TransformCovPars(cov_pars[0], pars, pars_trans);
cov_pars_trans.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_trans;
}
}
/*!
* \brief Back-transform the covariance parameters to the original scale
* \param cov_pars Covariance parameters
* \param[out] cov_pars_orig Back-transformed, original covariance parameters
*/
void TransformBackCovPars(const vec_t& cov_pars, vec_t& cov_pars_orig) {
CHECK(cov_pars.size() == num_cov_par_);
cov_pars_orig = vec_t(num_cov_par_);
cov_pars_orig[0] = cov_pars[0];
for (int j = 0; j < num_comps_total_; ++j) {
const vec_t pars = cov_pars.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]);
vec_t pars_orig = pars;
re_comps_[unique_clusters_[0]][j]->TransformBackCovPars(cov_pars[0], pars, pars_orig);
cov_pars_orig.segment(ind_par_[j] + 1, ind_par_[j + 1] - ind_par_[j]) = pars_orig;
}
}
/*!
* \brief Calculate covariance matrices of the components
*/
void CalcSigmaComps() {
for (const auto& cluster_i : unique_clusters_) {
for (int j = 0; j < num_comps_total_; ++j) {
re_comps_[cluster_i][j]->CalcSigma();
}
}
}
/*!
* \brief Calculate matrices A and D_inv as well as their derivatives for the Vecchia approximation for one cluster (independent realization of GP)
* \param num_data_cluster_i Number of data points
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param re_comps_cluster_i Container that collects the individual component models
* \param nearest_neighbors_cluster_i Collects indices of nearest neighbors
* \param dist_obs_neighbors_cluster_i Distances between locations and their nearest neighbors
* \param dist_between_neighbors_cluster_i Distances between nearest neighbors for all locations
* \param entries_init_B_cluster_i Triplets for intializing the matrices B
* \param entries_init_B_grad_cluster_i Triplets for intializing the matrices B_grad
* \param z_outer_z_obs_neighbors_cluster_i Outer product of covariate vector at observations and neighbors with itself for random coefficients. First index = data point i, second index = GP number j
* \param[out] B_cluster_i Matrix A = I - B (= Cholesky factor of inverse covariance) for Vecchia approximation
* \param[out] D_inv_cluster_i Diagonal matrices D^-1 for Vecchia approximation
* \param[out] B_grad_cluster_i Derivatives of matrices A ( = derivative of matrix -B) for Vecchia approximation
* \param[out] D_grad_cluster_i Derivatives of matrices D for Vecchia approximation
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance
*/
void CalcCovFactorVecchia(int num_data_cluster_i, bool calc_gradient,//TODO: make arguments const
std::vector<std::shared_ptr<RECompBase<T1>>>& re_comps_cluster_i, std::vector<std::vector<int>>& nearest_neighbors_cluster_i,
std::vector<den_mat_t>& dist_obs_neighbors_cluster_i, std::vector<den_mat_t>& dist_between_neighbors_cluster_i,
std::vector<Triplet_t >& entries_init_B_cluster_i, std::vector<Triplet_t >& entries_init_B_grad_cluster_i,
std::vector<std::vector<den_mat_t>>& z_outer_z_obs_neighbors_cluster_i,
sp_mat_t& B_cluster_i, sp_mat_t& D_inv_cluster_i, std::vector<sp_mat_t>& B_grad_cluster_i, std::vector<sp_mat_t>& D_grad_cluster_i,
bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
int num_par_comp = re_comps_cluster_i[ind_intercept_gp_]->num_cov_par_;
int num_par_gp = num_par_comp * num_gp_total_ + calc_gradient_nugget;
//Initialize matrices B = I - A and D^-1 as well as their derivatives (in order that the code below can be run in parallel)
B_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//B = I - A
B_cluster_i.setFromTriplets(entries_init_B_cluster_i.begin(), entries_init_B_cluster_i.end());//Note: 1's are put on the diagonal
D_inv_cluster_i = sp_mat_t(num_data_cluster_i, num_data_cluster_i);//D^-1. Note: we first calculate D, and then take the inverse below
D_inv_cluster_i.setIdentity();//Put 1's on the diagonal for nugget effect (entries are not overriden but added below)
if (!transf_scale) {
D_inv_cluster_i.diagonal().array() *= nugget_var;//nugget effect is not 1 if not on transformed scale
}
if (calc_gradient) {
B_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of B = derviateive of (-A)
D_grad_cluster_i = std::vector<sp_mat_t>(num_par_gp);//derivative of D
for (int ipar = 0; ipar < num_par_gp; ++ipar) {
B_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
B_grad_cluster_i[ipar].setFromTriplets(entries_init_B_grad_cluster_i.begin(), entries_init_B_grad_cluster_i.end());
D_grad_cluster_i[ipar] = sp_mat_t(num_data_cluster_i, num_data_cluster_i);
D_grad_cluster_i[ipar].setIdentity();//Put 1 on the diagonal but entries are overriden below
}
}//end initialization
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_cluster_i; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//calculate covariance matrices between observations and neighbors and among neighbors as well as their derivatives
den_mat_t cov_mat_obs_neighbors(1, num_nn);
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);
std::vector<den_mat_t> cov_grad_mats_obs_neighbors(num_par_gp);//covariance matrix plus derivative wrt to every parameter
std::vector<den_mat_t> cov_grad_mats_between_neighbors(num_par_gp);
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;//index of first parameter (variance) of component j in gradient vectors
if (j == 0) {
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],//re_comp->
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);//write on matrices directly for first GP component
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors[ind_first_par], cov_grad_mats_obs_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
re_comps_cluster_i[ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors[ind_first_par], cov_grad_mats_between_neighbors[ind_first_par + 1],
calc_gradient, transf_scale, nugget_var);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();//cov_mat_obs_neighbors_j.cwiseProduct()
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
if (calc_gradient) {
cov_grad_mats_obs_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_obs_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_grad_mats_between_neighbors[ind_first_par + 1].array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
}
}
}//end loop over components j
}//end if(i>1)
//Calculate matrices B and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii}) and its derivatives
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_cluster_i[ind_intercept_gp_ + j]->cov_pars_[0];
if (!transf_scale) {
d_comp_j *= nugget_var;
}
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
D_inv_cluster_i.coeffRef(i, i) += d_comp_j;
if (calc_gradient) {
if (transf_scale) {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = d_comp_j;//derivative of the covariance function wrt the variance. derivative of the covariance function wrt to range is zero on the diagonal
}
else {
D_grad_cluster_i[j * num_par_comp].coeffRef(i, i) = 1.;//1's on the diagonal on the orignal scale
}
}
}
if (calc_gradient && calc_gradient_nugget) {
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) = 1.;
}
//2. remaining terms
if (i > 0) {
if (transf_scale) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
}
else {
cov_mat_between_neighbors.diagonal().array() += nugget_var;
}
den_mat_t A_i(1, num_nn);
den_mat_t cov_mat_between_neighbors_inv;
den_mat_t A_i_grad_sigma2;
if (calc_gradient) {
den_mat_t I(num_nn, num_nn);
I.setIdentity();
cov_mat_between_neighbors_inv = cov_mat_between_neighbors.llt().solve(I);
A_i = cov_mat_obs_neighbors * cov_mat_between_neighbors_inv;
if (calc_gradient_nugget) {
A_i_grad_sigma2 = -A_i * cov_mat_between_neighbors_inv;
}
}
else {
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
}
for (int inn = 0; inn < num_nn; ++inn) {
B_cluster_i.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i(0, inn);
}
D_inv_cluster_i.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
if (calc_gradient) {
den_mat_t A_i_grad(1, num_nn);
for (int j = 0; j < num_gp_total_; ++j) {
int ind_first_par = j * num_par_comp;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
A_i_grad = (cov_grad_mats_obs_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv) -
(cov_mat_obs_neighbors * cov_mat_between_neighbors_inv *
cov_grad_mats_between_neighbors[ind_first_par + ipar] * cov_mat_between_neighbors_inv);
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[ind_first_par + ipar].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad(0, inn);
}
if (ipar == 0) {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) -= ((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//add to derivative of diagonal elements for marginal variance
}
else {
D_grad_cluster_i[ind_first_par + ipar].coeffRef(i, i) = -((A_i_grad * cov_mat_obs_neighbors.transpose())(0, 0) +
(A_i * cov_grad_mats_obs_neighbors[ind_first_par + ipar].transpose())(0, 0));//don't add to existing values since derivative of diagonal is zero for range
}
}
}
if (calc_gradient_nugget) {
for (int inn = 0; inn < num_nn; ++inn) {
B_grad_cluster_i[num_par_gp - 1].coeffRef(i, nearest_neighbors_cluster_i[i][inn]) = -A_i_grad_sigma2(0, inn);
}
D_grad_cluster_i[num_par_gp - 1].coeffRef(i, i) -= (A_i_grad_sigma2 * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end calc_gradient
}//end if i > 0
D_inv_cluster_i.coeffRef(i, i) = 1. / D_inv_cluster_i.coeffRef(i, i);
}//end loop over data i
}
/*!
* \brief Create the covariance matrix Psi and factorize it (either calculate a Cholesky factor or the inverse covariance matrix)
* \param calc_gradient If true, the gradient also be calculated (only for Vecchia approximation)
* \param transf_scale If true, the derivatives are taken on the transformed scale otherwise on the original scale. Default = true (only for Vecchia approximation)
* \param nugget_var Nugget effect variance parameter sigma^2 (used only if transf_scale = false to transform back, normally this is equal to one, since the variance paramter is modelled separately and factored out)
* \param calc_gradient_nugget If true, derivatives are also taken with respect to the nugget / noise variance (only for Vecchia approximation)
*/
void CalcCovFactor(bool calc_gradient = false, bool transf_scale = true, double nugget_var = 1., bool calc_gradient_nugget = false) {
if (vecchia_approx_) {
for (const auto& cluster_i : unique_clusters_) {
int num_data_cl_i = num_data_per_cluster_[cluster_i];
CalcCovFactorVecchia(num_data_cl_i, calc_gradient, re_comps_[cluster_i], nearest_neighbors_[cluster_i],
dist_obs_neighbors_[cluster_i], dist_between_neighbors_[cluster_i],
entries_init_B_[cluster_i], entries_init_B_grad_[cluster_i], z_outer_z_obs_neighbors_[cluster_i],
B_[cluster_i], D_inv_[cluster_i], B_grad_[cluster_i], D_grad_[cluster_i], transf_scale, nugget_var, calc_gradient_nugget);
}
}
else {
CalcSigmaComps();
for (const auto& cluster_i : unique_clusters_) {
T1 psi;
psi.resize(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
psi.setIdentity();
for (int j = 0; j < num_comps_total_; ++j) {
psi += (*(re_comps_[cluster_i][j]->GetZSigmaZt()));
}
CalcChol<T1>(psi, cluster_i, do_symbolic_decomposition_);
}
do_symbolic_decomposition_ = false;//Symbolic decompostion done only once (if sparse matrices are used)
}
}
/*!
* \brief Calculate Psi^-1*y (=y_aux_) for RE model
* \param marg_variance The marginal variance. Default = 1.
*/
void CalcYAux(double marg_variance = 1.) {
for (const auto& cluster_i : unique_clusters_) {
if (y_.find(cluster_i) == y_.end()) {
Log::Fatal("Response variable data (y_) for random effects model has not been set. Call 'SetY' first.");
}
if (vecchia_approx_) {
if (B_.find(cluster_i) == B_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
y_aux_[cluster_i] = B_[cluster_i].transpose() * D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];
}//end Vecchia
else {
if (chol_facts_.find(cluster_i) == chol_facts_.end()) {
Log::Fatal("Factorisation of covariance matrix has not been done. Call 'CalcCovFactor' first.");
}
//Version 1: let Eigen do the computation
y_aux_[cluster_i] = chol_facts_solve_[cluster_i].solve(y_[cluster_i]);
//// Version 2 'do-it-yourself'
//y_aux_[cluster_i] = y_[cluster_i];
//const double* val = chol_facts_[cluster_i].valuePtr();
//const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
//const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
//sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
//sp_L_t_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], y_aux_[cluster_i].data());
}//end non-Vecchia
if (marg_variance != 1.) {
y_aux_[cluster_i] /= marg_variance;
}
}
y_aux_has_been_calculated_ = true;
}
/*!
* \brief Calculate gradient for covariance parameters
* \return Gradient for covariance parameters
*/
vec_t GetCovParGrad() {
vec_t cov_grad = vec_t::Zero(num_cov_par_ - 1);
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
vec_t u(num_data_per_cluster_[cluster_i]);
vec_t uk(num_data_per_cluster_[cluster_i]);
u = D_inv_[cluster_i] * B_[cluster_i] * y_[cluster_i];//TODO: this is already calculated in CalcYAux -> save it there and re-use here?
for (int j = 0; j < num_comps_total_; ++j) {
int num_par_comp = re_comps_[cluster_i][j]->num_cov_par_;
for (int ipar = 0; ipar < num_par_comp; ++ipar) {
uk = B_grad_[cluster_i][num_par_comp * j + ipar] * y_[cluster_i];
cov_grad[ind_par_[j] + ipar] += ((uk.dot(u) - 0.5 * u.dot(D_grad_[cluster_i][num_par_comp * j + ipar] * u)) / sigma2_ +
0.5 * (D_inv_[cluster_i].diagonal()).dot(D_grad_[cluster_i][num_par_comp * j + ipar].diagonal()));
}
}
}//end Vecchia
else {
T1 psi_inv;
CalcPsiInv(psi_inv, cluster_i);
//////Version 2: doing sparse solving but ignoring sparse RHS
////const double* val = chol_facts_[cluster_i].valuePtr();
////const int* row_idx = chol_facts_[cluster_i].innerIndexPtr();
////const int* col_ptr = chol_facts_[cluster_i].outerIndexPtr();
////den_mat_t L_inv_dens = den_mat_t(Id_[cluster_i]);
////for (int j = 0; j < num_data_per_cluster_[cluster_i]; ++j) {
//// sp_L_solve(val, row_idx, col_ptr, num_data_per_cluster_[cluster_i], L_inv_dens.data() + j * num_data_per_cluster_[cluster_i]);
////}
////const sp_mat_t L_inv = L_inv_dens.sparseView();
////const sp_mat_t psi_inv = L_inv.transpose() * L_inv;
//////Version 1: let Eigen do the solve
////const sp_mat_t psi_inv = chol_facts_solve_[cluster_i].solve(Id_[cluster_i]);
//sp_mat_t psi_inv = re_comps_[cluster_i][0]->GetZSigmaZt(cov_pars);
for (int j = 0; j < num_comps_total_; ++j) {
for (int ipar = 0; ipar < re_comps_[cluster_i][j]->num_cov_par_; ++ipar) {
std::shared_ptr<T1> gradPsi = re_comps_[cluster_i][j]->GetZSigmaZtGrad(ipar);
//if (ipar == 1) {
// for (int i = 0; i < 3; ++i) {
// for (int j = i; j < 3; ++j) {
// Log::Info("(*gradPsi)(%d,%d): %f", i, j, (*gradPsi).coeff(i, j));
// }
// }
//}
cov_grad[ind_par_[j] + ipar] += -1. * ((double)(y_aux_[cluster_i].transpose() * (*gradPsi) * y_aux_[cluster_i])) / sigma2_ / 2. +
((double)(((*gradPsi).cwiseProduct(psi_inv)).sum())) / 2.;
}
}
}//end standard (non-Vecchia) calculation
}// end loop over clusters
return(cov_grad);
}
/*!
* \brief Apply a momentum step
* \param it Iteration number
* \param[out] pars Parameters
* \param[out] pars_lag1 Parameters from last iteration
* \param use_nesterov_acc Indicates whether Nesterov acceleration is used in the gradient descent for finding the covariance parameters. Default = true
* \param nesterov_acc_rate Acceleration rate for Nesterov acceleration
* \param nesterov_schedule_version Which version of Nesterov schedule should be used. Default = 0
* \param exclude_first_log_scale If true, no momentum is applied to the first value and the momentum step is done on the log-scale for the other values. Default = true
* \param momentum_offset Number of iterations for which no mometum is applied in the beginning
*/
void ApplyMomentumStep(int it, vec_t& pars, vec_t& pars_lag1, bool use_nesterov_acc = true,
double nesterov_acc_rate = 0.5, int nesterov_schedule_version = 0, bool exclude_first_log_scale = true,
int momentum_offset = 2) {
if (use_nesterov_acc) {
double mu = NesterovSchedule(it, nesterov_schedule_version, nesterov_acc_rate, momentum_offset);
int num_par = (int)pars.size();
vec_t pars_mom(num_par);//Covariance parameters plus a momentum step
if (exclude_first_log_scale) {
pars_mom.segment(1, num_par - 1) = ((mu + 1.) * (pars.segment(1, num_par - 1).array().log()) - mu * (pars_lag1.segment(1, num_par - 1).array().log())).exp().matrix();//Momentum is added on the log scale
pars_mom[0] = pars[0];
}
else {
pars_mom = (mu + 1) * pars - mu * pars_lag1;
}
pars_lag1 = pars;
pars = pars_mom;
}
else {
pars_lag1 = pars;
}
}
/*!
* \brief Update covariance parameters doing one gradient descent step (except for the marginal variance which is updated using an explicit solution)
* \param lr Learning rate
* \param[out] cov_pars Covariance parameters
*/
void UpdateCovParGradOneIter(double lr, vec_t& cov_pars) {
cov_pars[0] = 0.;
for (const auto& cluster_i : unique_clusters_) {
cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]);
}
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
vec_t grad = GetCovParGrad();
cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - lr * grad.array()).exp().matrix();
//for (int i = 0; i < (int)grad.size(); ++i) { Log::Debug("grad[%d]: %f", i, grad[i]); }//For debugging only
}
/*!
* \brief Update covariance parameters doing one step of Fisher scoring (except for the marginal variance which is updated using an explicit solution)
* \param[out] cov_pars Covariance parameters
*/
void UpdateCovParFisherScoringOneIter(vec_t& cov_pars) {
cov_pars[0] = 0.;
for (const auto& cluster_i : unique_clusters_) {
cov_pars[0] += (double)(y_[cluster_i].transpose() * y_aux_[cluster_i]);
}
cov_pars[0] /= num_data_;
sigma2_ = cov_pars[0];
vec_t grad = GetCovParGrad();
den_mat_t FI;
CalcFisherInformation(cov_pars, FI, true, false);
vec_t update = FI.llt().solve(grad);
cov_pars.segment(1, num_cov_par_ - 1) = (cov_pars.segment(1, num_cov_par_ - 1).array().log() - update.array()).exp().matrix();//make update on log-scale
}
/*!
* \brief Update linear fixed-effect coefficients doing one gradient descent step
* \param lr Learning rate
* \param marg_var Marginal variance parameters sigma^2
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGradOneIter(double lr, double marg_var, den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
beta += lr * (1. / marg_var) * (X.transpose()) * y_aux;
}
/*!
* \brief Update linear fixed-effect coefficients using generalized least squares (GLS)
* \param X Covariate data for linear fixed-effect
* \param[out] beta Linear regression coefficients
*/
void UpdateCoefGLS(den_mat_t& X, vec_t& beta) {
vec_t y_aux(num_data_);
GetYAux(y_aux);
den_mat_t XT_psi_inv_X;
CalcXTPsiInvX(X, XT_psi_inv_X);
beta = XT_psi_inv_X.llt().solve(X.transpose() * y_aux);
}
/*!
* \brief Check whether NaN's are presend
* \param par Vector of parameters that should be checked
*/
void CheckNaN(vec_t& par) {
if (std::isnan(par[0])) {
Log::Fatal("NaN occurred. (if gradient descent is used, consider using a smaller learning rate)");
}
}
/*!
* \brief Calculate the Fisher information for covariance parameters. Note: you need to call CalcCovFactor first
* \param cov_pars Covariance parameters
* \param[out] FI Fisher information
* \param transf_scale If true, the derivative is taken on the transformed scale otherwise on the original scale. Default = true
* \param include_marg_var If true, the marginal variance parameter is also included, otherwise not
*/
void CalcFisherInformation(const vec_t& cov_pars, den_mat_t& FI, bool transf_scale = true, bool include_marg_var = false) {
if (include_marg_var) {
FI = den_mat_t(num_cov_par_, num_cov_par_);
}
else {
FI = den_mat_t(num_cov_par_ - 1, num_cov_par_ - 1);
}
FI.setZero();
for (const auto& cluster_i : unique_clusters_) {
if (vecchia_approx_) {
sp_mat_t Identity(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
Identity.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B_[cluster_i], Identity, B_inv, true);
sp_mat_t D = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D.setIdentity();
D.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(-1);
sp_mat_t D_inv_2 = sp_mat_t(num_data_per_cluster_[cluster_i], num_data_per_cluster_[cluster_i]);
D_inv_2.setIdentity();
D_inv_2.diagonal().array() = D_inv_[cluster_i].diagonal().array().pow(2);
//Counters for the covariance parameters
int par_i, par_j;
int start_cov_pars = include_marg_var ? 1 : 0;
sp_mat_t D_inv_B_grad_B_inv, B_grad_B_inv_D;
if (include_marg_var) {
//First for nugget effect / noise variance parameter
int ind_grad_nugget = re_comps_[cluster_i][ind_intercept_gp_]->num_cov_par_ * num_gp_total_;
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][ind_grad_nugget] * B_inv;
B_grad_B_inv_D = B_grad_[cluster_i][ind_grad_nugget] * B_inv * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array()).sum());
FI(0, 0) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
par_j = 0;
for (int j = 0; j < num_comps_total_; ++j) {
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
B_grad_B_inv_D = B_grad_[cluster_i][par_j] * B_inv * D;
diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][ind_grad_nugget].diagonal().array() * D_grad_[cluster_i][par_j].diagonal().array()).sum());
FI(0, par_j + 1) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
par_j++;
}
}
}
par_i = 0;
//Remaining covariance parameters
for (int i = 0; i < num_comps_total_; ++i) {
for (int ipar = 0; ipar < re_comps_[cluster_i][i]->num_cov_par_; ++ipar) {
D_inv_B_grad_B_inv = D_inv_[cluster_i] * B_grad_[cluster_i][par_i] * B_inv;
par_j = 0;
for (int j = 0; j < num_comps_total_; ++j) {
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
if (par_j >= par_i) {
B_grad_B_inv_D = B_grad_[cluster_i][par_j] * B_inv * D;
double diag = (double)((D_inv_2.diagonal().array() * D_grad_[cluster_i][par_i].diagonal().array() * D_grad_[cluster_i][par_j].diagonal().array()).sum());
FI(par_i + start_cov_pars, par_j + start_cov_pars) += ((double)(B_grad_B_inv_D.cwiseProduct(D_inv_B_grad_B_inv)).sum() + diag / 2.);
}
par_j++;
}
}
par_i++;
}
}//end loop over components
}//end Vecchia approximation
else {
T1 psi_inv;
CalcPsiInv(psi_inv, cluster_i);
if (!transf_scale) {
psi_inv /= cov_pars[0];//psi_inv has been calculated with a transformed parametrization, so we need to divide everything by cov_pars[0] to obtain the covariance matrix
}
int par_i = 0;
int par_j;
if (include_marg_var) {
//First for nugget effect / noise variance parameter
T1 psi_inv_grad_psi_sigma2 = psi_inv;//The gradient for the nugget variance is the identity matrix.
FI(par_i, par_i) += ((double)(psi_inv_grad_psi_sigma2.cwiseProduct(psi_inv_grad_psi_sigma2)).sum()) / 2.;
par_j = 1;
for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
T1 psi_inv_grad_psi_par_j = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0]));
FI(par_i, par_j) += ((double)(psi_inv_grad_psi_sigma2.cwiseProduct(psi_inv_grad_psi_par_j)).sum()) / 2.;
par_j++;
}
}
par_i = 1;
}
//Remaining covariance parameters
for (int i = 0; i < num_comps_total_; ++i) {
for (int ipar = 0; ipar < re_comps_[cluster_i][i]->num_cov_par_; ++ipar) {
T1 psi_inv_grad_psi_par_i = psi_inv * *(re_comps_[cluster_i][i]->GetZSigmaZtGrad(ipar, transf_scale, cov_pars[0]));
T1 psi_inv_grad_psi_par_i_T = psi_inv_grad_psi_par_i.transpose();
FI(par_i, par_i) += ((double)(psi_inv_grad_psi_par_i_T.cwiseProduct(psi_inv_grad_psi_par_i)).sum()) / 2.;
psi_inv_grad_psi_par_i.resize(0, 0);//not needed anymore
if (include_marg_var) {
par_j = 1;
}
else {
par_j = 0;
}
for (int j = 0; j < num_comps_total_; ++j) {//there is currently no possibility to loop over the parameters directly
for (int jpar = 0; jpar < re_comps_[cluster_i][j]->num_cov_par_; ++jpar) {
if (par_j > par_i) {
T1 psi_inv_grad_psi_par_j = psi_inv * *(re_comps_[cluster_i][j]->GetZSigmaZtGrad(jpar, transf_scale, cov_pars[0]));
FI(par_i, par_j) += ((double)(psi_inv_grad_psi_par_i_T.cwiseProduct(psi_inv_grad_psi_par_j)).sum()) / 2.;
}
par_j++;
}
}
par_i++;
}
}//end loop over components
}//end non-Vecchia approximation
}//end loop over clusters
FI.triangularView<Eigen::StrictlyLower>() = FI.triangularView<Eigen::StrictlyUpper>().transpose();
//for (int i = 0; i < (int)FI.rows(); ++i) {
// for (int j = i; j < (int)FI.cols(); ++j) {
// Log::Info("FI(%d,%d) %f", i, j, FI(i, j));
// }
//}
}
/*!
* \brief Calculate the standard deviations for the MLE of the covariance parameters as the diagonal of the inverse Fisher information (on the orignal scale and not the transformed scale used in the optimization)
* \param cov_pars MLE of covariance parameters
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCovPar(const vec_t& cov_pars, vec_t& std_dev) {
SetCovParsComps(cov_pars);
CalcCovFactor(true, false, cov_pars[0], true);
den_mat_t FI;
CalcFisherInformation(cov_pars, FI, false, true);
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
/*!
* \brief Calculate the standard deviations for the MLE of the regression coefficients as the diagonal of the inverse Fisher information
* \param cov_pars MLE of covariance parameters
* \param X Covariate data for linear fixed-effect
* \param[out] std_dev Standard deviations
*/
void CalcStdDevCoef(vec_t& cov_pars, const den_mat_t& X, vec_t& std_dev) {
if ((int)std_dev.size() >= num_data_) {
Log::Warning("Sample size too small to calculate standard deviations for coefficients");
for (int i = 0; i < (int)std_dev.size(); ++i) {
std_dev[i] = std::numeric_limits<double>::quiet_NaN();
}
}
else {
SetCovParsComps(cov_pars);
CalcCovFactor(false, true, 1., false);
den_mat_t FI((int)X.cols(), (int)X.cols());
CalcXTPsiInvX(X, FI);
FI /= cov_pars[0];
std_dev = FI.inverse().diagonal().array().sqrt().matrix();
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) (for one cluster
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param re_group_levels_pred Group levels for the grouped random effects (re_group_levels_pred[j] contains the levels for RE number j)
* \param re_group_rand_coef_data_pred Random coefficient data for grouped REs
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPred(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const std::vector<std::vector<string_t>>& re_group_levels_pred, const double* re_group_rand_coef_data_pred,
const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
// Vector which contains covariance matrices needed for making predictions in the following order:
// 0. Ztilde*Sigma*Z^T, 1. Zstar*Sigmatilde^T*Z^T, 2. Ztilde*Sigma*Ztilde^T, 3. Ztilde*Sigmatilde*Zstar^T, 4. Zstar*Sigmastar*Zstar^T
std::vector<T1> pred_mats(5);
//Define which covariance matrices are zero ('false') or non-zero ('true')
std::vector<bool> active_mats{ false, false, false, false, false };
if (num_re_group_total_ > 0) {
active_mats[0] = true;
active_mats[2] = true;
active_mats[4] = true;
}
if (num_gp_total_ > 0) {
active_mats[1] = true;
active_mats[4] = true;
}
//Initialize covariance matrices
for (int i = 0; i < 2; ++i) {
if (active_mats[i]) {
pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);
pred_mats[i].setZero();
}
}
if (predict_cov_mat) {
for (int i = 2; i < 5; ++i) {
if (active_mats[i]) {
pred_mats[i].resize(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_pred[cluster_i]);
pred_mats[i].setZero();
}
}
}
//Calculate covariance matrices
int cn = 0;//component number
if (num_re_group_ > 0) {
//Grouped random effects
for (int j = 0; j < num_re_group_; ++j) {
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
group_data.push_back(re_group_levels_pred[j][id]);
}
re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat);
cn += 1;
}
if (num_re_group_rand_coef_ > 0) {
//Random coefficient grouped random effects
for (int j = 0; j < num_re_group_rand_coef_; ++j) {
std::shared_ptr<RECompGroup<T1>> re_comp = std::dynamic_pointer_cast<RECompGroup<T1>>(re_comps_[cluster_i][cn]);
std::vector<re_group_t> group_data;
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(re_group_rand_coef_data_pred[j * num_data_pred + id]);
group_data.push_back(re_group_levels_pred[ind_effect_group_rand_coef_[j] - 1][id]);//subtract 1 since counting starts at one for this index
}
re_comp->AddPredCovMatrices(group_data, pred_mats, predict_cov_mat, rand_coef_data.data());
cn += 1;
}
}
}
if (num_gp_ > 0) {
//Gaussian process
std::shared_ptr<RECompGP<T1>> re_comp_base = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]);
re_comp_base->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat);
cn += 1;
if (num_gp_rand_coef_ > 0) {
std::shared_ptr<RECompGP<T1>> re_comp;
//Random coefficient Gaussian processes
for (int j = 0; j < num_gp_rand_coef_; ++j) {
re_comp = std::dynamic_pointer_cast<RECompGP<T1>>(re_comps_[cluster_i][cn]);
std::vector<double> rand_coef_data;
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
re_comp->AddPredCovMatrices(re_comp_base->coords_, gp_coords_mat_pred, pred_mats, predict_cov_mat, rand_coef_data.data());
cn += 1;
}
}
}
T1 M_aux(num_data_per_cluster_pred[cluster_i], num_data_per_cluster_[cluster_i]);//Ztilde*Sigma*Z^T + Zstar*Sigmatilde^T*Z^T
M_aux.setZero();
for (int i = 0; i < 2; ++i) {
if (active_mats[i]) {
M_aux += pred_mats[i];
}
}
mean_pred_id = M_aux * y_aux_[cluster_i];
if (predict_cov_mat) {
cov_mat_pred_id.setIdentity();
for (int i = 2; i < 5; ++i) {
if (active_mats[i]) {
cov_mat_pred_id += pred_mats[i];
if (i == 3) {//Ztilde*Sigmatilde*Zstar^T
cov_mat_pred_id += T1(pred_mats[i].transpose());
}
}
}
cov_mat_pred_id -= (M_aux * (chol_facts_solve_[cluster_i].solve(T1(M_aux.transpose()))));
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_pred_cli);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_pred_cli);
if (CondObsOnly) {
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, num_data_cli - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all, num_data_cli + num_data_pred_cli, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, num_data_cli, -1);
}
//Random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_pred_cli);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_;//First entries are the observed data, then the predicted data
for (const auto& id : data_indices_per_cluster_pred[cluster_i]) {//TODO: maybe do the following in parallel? (see CalcPredVecchiaPredictedFirstOrder)
rand_coef_data.push_back(gp_rand_coef_data_pred[j * num_data_pred + id]);
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[num_data_cli + i];
if ((num_data_cli + i) > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_Bpo, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
entries_init_Bpo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli, 0.));
}
}
}
sp_mat_t Bpo(num_data_pred_cli, num_data_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bpo.setFromTriplets(entries_init_Bpo.begin(), entries_init_Bpo.end());//initialize matrices (in order that the code below can be run in parallel)
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Dp(num_data_pred_cli, num_data_pred_cli);
Dp.setIdentity();//Put 1 on the diagonal (for nugget effect)
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
Dp.coeffRef(i, i) += d_comp_j;
}
//2. remaining terms
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (nearest_neighbors_cluster_i[i][inn] < num_data_cli) {//nearest neighbor belongs to observed data
Bpo.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn] - num_data_cli) -= A_i(0, inn);
}
}
Dp.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}//end loop over data i
mean_pred_id = -Bpo * y_[cluster_i];
if (!CondObsOnly) {
sp_L_solve(Bp.valuePtr(), Bp.innerIndexPtr(), Bp.outerIndexPtr(), num_data_pred_cli, mean_pred_id.data());
}
if (predict_cov_mat) {
if (CondObsOnly) {
cov_mat_pred_id = Dp;
}
else {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t Bp_inv;
eigen_sp_Lower_sp_RHS_cs_solve(Bp, Identity, Bp_inv, true);
cov_mat_pred_id = T1(Bp_inv * Dp * Bp_inv.transpose());
}
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the covariance matrix of the observable proces when prediction locations appear first in the ordering
* \param cluster_i Cluster index for which prediction are made
* \param num_data_pred Number of prediction locations
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param data_indices_per_cluster_pred Keys: labels of independent clusters, values: vectors with indices for data points that belong to the every cluster
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param gp_rand_coef_data_pred Random coefficient data for GPs
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaPredictedFirstOrder(gp_id_t cluster_i, int num_data_pred,
std::map<gp_id_t, int>& num_data_per_cluster_pred, std::map<gp_id_t, std::vector<int>>& data_indices_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred, const double* gp_rand_coef_data_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_tot, dim_gp_coords_);
coords_all << gp_coords_mat_pred, gp_coords_mat_obs;
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_data_tot);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_data_tot);
find_nearest_neighbors_Veccia_fast(coords_all, num_data_tot, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
//Prepare data for random coefficients
std::vector<std::vector<den_mat_t>> z_outer_z_obs_neighbors_cluster_i(num_data_tot);
if (num_gp_rand_coef_ > 0) {
for (int j = 0; j < num_gp_rand_coef_; ++j) {
std::vector<double> rand_coef_data(num_data_tot);//First entries are the predicted data, then the observed data
#pragma omp for schedule(static)
for (int i = 0; i < num_data_pred_cli; ++i) {
rand_coef_data[i] = gp_rand_coef_data_pred[j * num_data_pred + data_indices_per_cluster_pred[cluster_i][i]];
}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_cli; ++i) {
rand_coef_data[num_data_pred_cli + i] = re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_[i];
}
//re_comps_[cluster_i][ind_intercept_gp_ + j + 1]->rand_coef_data_
//for (int i = 0; i < rand_coef_data.size(); ++i) {
// Log::Info("rand_coef_data[%d]: %f", i, rand_coef_data[i]);
//}
#pragma omp for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
z_outer_z_obs_neighbors_cluster_i[i] = std::vector<den_mat_t>(num_gp_rand_coef_);
int dim_z = (int)nearest_neighbors_cluster_i[i].size() + 1;
vec_t coef_vec(dim_z);
coef_vec(0) = rand_coef_data[i];
if (i > 0) {
for (int ii = 1; ii < dim_z; ++ii) {
coef_vec(ii) = rand_coef_data[nearest_neighbors_cluster_i[i][ii - 1]];
}
}
z_outer_z_obs_neighbors_cluster_i[i][j] = coef_vec * coef_vec.transpose();
}
}
}
// Determine Triplet for initializing Bo, Bop, and Bp
std::vector<Triplet_t> entries_init_Bo, entries_init_Bop, entries_init_Bp;
for (int i = 0; i < num_data_pred_cli; ++i) {
entries_init_Bp.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_Bp.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
for (int i = 0; i < num_data_cli; ++i) {
entries_init_Bo.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i + num_data_pred_cli].size(); ++inn) {
if (nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
entries_init_Bop.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn], 0.));
}
else {//nearest neighbor belongs to predicted data
entries_init_Bo.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i + num_data_pred_cli][inn] - num_data_pred_cli, 0.));
}
}
}
sp_mat_t Bo(num_data_cli, num_data_cli);
sp_mat_t Bop(num_data_cli, num_data_pred_cli);
sp_mat_t Bp(num_data_pred_cli, num_data_pred_cli);
Bo.setFromTriplets(entries_init_Bo.begin(), entries_init_Bo.end());//initialize matrices (in order that the code below can be run in parallel)
Bop.setFromTriplets(entries_init_Bop.begin(), entries_init_Bop.end());
Bp.setFromTriplets(entries_init_Bp.begin(), entries_init_Bp.end());
sp_mat_t Do_inv(num_data_cli, num_data_cli);
sp_mat_t Dp_inv(num_data_pred_cli, num_data_pred_cli);
Do_inv.setIdentity();//Put 1 on the diagonal (for nugget effect)
Dp_inv.setIdentity();
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data_tot; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
for (int j = 0; j < num_gp_total_; ++j) {
if (j == 0) {
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
else {//random coefficient GPs
den_mat_t cov_mat_obs_neighbors_j;
den_mat_t cov_mat_between_neighbors_j;
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors_j, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);
re_comps_[cluster_i][ind_intercept_gp_ + j]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors_j, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
//multiply by coefficient matrix
cov_mat_obs_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(0, 1, 1, num_nn)).array();
cov_mat_between_neighbors_j.array() *= (z_outer_z_obs_neighbors_cluster_i[i][j - 1].block(1, 1, num_nn, num_nn)).array();
cov_mat_obs_neighbors += cov_mat_obs_neighbors_j;
cov_mat_between_neighbors += cov_mat_between_neighbors_j;
}
}//end loop over components j
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
for (int j = 0; j < num_gp_total_; ++j) {
double d_comp_j = re_comps_[cluster_i][ind_intercept_gp_ + j]->cov_pars_[0];
if (j > 0) {//random coefficient
d_comp_j *= z_outer_z_obs_neighbors_cluster_i[i][j - 1](0, 0);
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) += d_comp_j;
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) += d_comp_j;
}
}
//2. remaining terms
if (i > 0) {
cov_mat_between_neighbors.diagonal().array() += 1.;//add nugget effect
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
if (i < num_data_pred_cli) {
Bp.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
if (nearest_neighbors_cluster_i[i][inn] < num_data_pred_cli) {//nearest neighbor belongs to predicted data
Bop.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
else {
Bo.coeffRef(i - num_data_pred_cli, nearest_neighbors_cluster_i[i][inn] - num_data_pred_cli) -= A_i(0, inn);
}
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}
if (i < num_data_pred_cli) {
Dp_inv.coeffRef(i, i) = 1 / Dp_inv.coeffRef(i, i);
}
else {
Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli) = 1 / Do_inv.coeffRef(i - num_data_pred_cli, i - num_data_pred_cli);
}
}//end loop over data i
sp_mat_t cond_prec = Bp.transpose() * Dp_inv * Bp + Bop.transpose() * Do_inv * Bop;
chol_sp_mat_t CholFact;
CholFact.compute(cond_prec);
if (predict_cov_mat) {
sp_mat_t Identity(num_data_pred_cli, num_data_pred_cli);
Identity.setIdentity();
sp_mat_t cond_prec_chol = CholFact.matrixL();
sp_mat_t cond_prec_chol_inv;
eigen_sp_Lower_sp_RHS_cs_solve(cond_prec_chol, Identity, cond_prec_chol_inv, true);
cov_mat_pred_id = T1(cond_prec_chol_inv.transpose() * cond_prec_chol_inv);
mean_pred_id = -cov_mat_pred_id * Bop.transpose() * Do_inv * Bo * y_[cluster_i];
}
else {
mean_pred_id = -CholFact.solve(Bop.transpose() * Do_inv * Bo * y_[cluster_i]);
}
}
/*!
* \brief Calculate predictions (conditional mean and covariance matrix) using the Vecchia approximation for the latent process when observed locations appear first in the ordering
* \param CondObsOnly If true, the nearest neighbors for the predictions are found only among the observed data
* \param cluster_i Cluster index for which prediction are made
* \param num_data_per_cluster_pred Keys: Labels of independent realizations of REs/GPs, values: number of prediction locations per independent realization
* \param gp_coords_mat_obs Coordinates for observed locations
* \param gp_coords_mat_pred Coordinates for prediction locations
* \param predict_cov_mat If true, the covariance matrix is also calculated
* \param[out] mean_pred_id Predicted mean
* \param[out] cov_mat_pred_id Predicted covariance matrix
*/
void CalcPredVecchiaLatentObservedFirstOrder(bool CondObsOnly, gp_id_t cluster_i,
std::map<gp_id_t, int>& num_data_per_cluster_pred,
const den_mat_t& gp_coords_mat_obs, const den_mat_t& gp_coords_mat_pred,
bool predict_cov_mat, vec_t& mean_pred_id, T1& cov_mat_pred_id) {
if (num_gp_rand_coef_ > 0) {
Log::Fatal("The Vecchia approximation for the latent process is currently not implemented when having random coefficients");
}
int num_data_cli = num_data_per_cluster_[cluster_i];
int num_data_pred_cli = num_data_per_cluster_pred[cluster_i];
int num_data_tot = num_data_cli + num_data_pred_cli;
//Find nearest neighbors
den_mat_t coords_all(num_data_cli + num_data_pred_cli, dim_gp_coords_);
coords_all << gp_coords_mat_obs, gp_coords_mat_pred;
//Determine number of unique observartion locations
std::vector<int> uniques;//unique points
std::vector<int> unique_idx;//used for constructing incidence matrix Z_ if there are duplicates
DetermineUniqueDuplicateCoords(gp_coords_mat_obs, num_data_cli, uniques, unique_idx);
int num_coord_unique_obs = (int)uniques.size();
//Determine unique locations (observed and predicted)
DetermineUniqueDuplicateCoords(coords_all, num_data_tot, uniques, unique_idx);
int num_coord_unique = (int)uniques.size();
den_mat_t coords_all_unique;
if ((int)uniques.size() == num_data_tot) {//no multiple observations at the same locations -> no incidence matrix needed
coords_all_unique = coords_all;
}
else {
coords_all_unique = coords_all(uniques, Eigen::all);
}
//Determine incidence matrices
sp_mat_t Z_o = sp_mat_t(num_data_cli, uniques.size());
sp_mat_t Z_p = sp_mat_t(num_data_pred_cli, uniques.size());
for (int i = 0; i < num_data_tot; ++i) {
if (i < num_data_cli) {
Z_o.insert(i, unique_idx[i]) = 1.;
}
else {
Z_p.insert(i - num_data_cli, unique_idx[i]) = 1.;
}
}
std::vector<std::vector<int>> nearest_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_obs_neighbors_cluster_i(num_coord_unique);
std::vector<den_mat_t> dist_between_neighbors_cluster_i(num_coord_unique);
if (CondObsOnly) {//find neighbors among both the observed locations only
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, num_coord_unique_obs - 1);
}
else {//find neighbors among both the observed and prediction locations
find_nearest_neighbors_Veccia_fast(coords_all_unique, num_coord_unique, num_neighbors_pred_,
nearest_neighbors_cluster_i, dist_obs_neighbors_cluster_i, dist_between_neighbors_cluster_i, 0, -1);
}
// Determine Triplet for initializing Bpo and Bp
std::vector<Triplet_t> entries_init_B;
for (int i = 0; i < num_coord_unique; ++i) {
entries_init_B.push_back(Triplet_t(i, i, 1.));//Put 1 on the diagonal
for (int inn = 0; inn < (int)nearest_neighbors_cluster_i[i].size(); ++inn) {
entries_init_B.push_back(Triplet_t(i, nearest_neighbors_cluster_i[i][inn], 0.));
}
}
sp_mat_t B(num_coord_unique, num_coord_unique);
B.setFromTriplets(entries_init_B.begin(), entries_init_B.end());//initialize matrices (in order that the code below can be run in parallel)
sp_mat_t D(num_coord_unique, num_coord_unique);
D.setIdentity();
D.diagonal().array() = 0.;
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_coord_unique; ++i) {
int num_nn = (int)nearest_neighbors_cluster_i[i].size();
//define covariance and gradient matrices
den_mat_t cov_mat_obs_neighbors(1, num_nn);//dim = 1 x nn
den_mat_t cov_mat_between_neighbors(num_nn, num_nn);//dim = nn x nn
den_mat_t cov_grad_mats_obs_neighbors, cov_grad_mats_between_neighbors; //not used, just as mock argument for functions below
if (i > 0) {
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_obs_neighbors_cluster_i[i],
cov_mat_obs_neighbors, cov_grad_mats_obs_neighbors, cov_grad_mats_obs_neighbors, false);//write on matrices directly for first GP component
re_comps_[cluster_i][ind_intercept_gp_]->CalcSigmaAndSigmaGrad(dist_between_neighbors_cluster_i[i],
cov_mat_between_neighbors, cov_grad_mats_between_neighbors, cov_grad_mats_between_neighbors, false);
}
//Calculate matrices A and D as well as their derivatives
//1. add first summand of matrix D (ZCZ^T_{ii})
D.coeffRef(i, i) = re_comps_[cluster_i][ind_intercept_gp_]->cov_pars_[0];
//2. remaining terms
if (i > 0) {
den_mat_t A_i(1, num_nn);//dim = 1 x nn
A_i = (cov_mat_between_neighbors.llt().solve(cov_mat_obs_neighbors.transpose())).transpose();
for (int inn = 0; inn < num_nn; ++inn) {
B.coeffRef(i, nearest_neighbors_cluster_i[i][inn]) -= A_i(0, inn);
}
D.coeffRef(i, i) -= (A_i * cov_mat_obs_neighbors.transpose())(0, 0);
}
}//end loop over data i
//Calculate D_inv and B_inv in order to calcualte Sigma and Sigma^-1
sp_mat_t D_inv(num_coord_unique, num_coord_unique);
D_inv.setIdentity();
D_inv.diagonal().array() = D.diagonal().array().pow(-1);
sp_mat_t Identity_all(num_coord_unique, num_coord_unique);
Identity_all.setIdentity();
sp_mat_t B_inv;
eigen_sp_Lower_sp_RHS_cs_solve(B, Identity_all, B_inv, true);
//Calculate inverse of covariance matrix for observed data using the Woodbury identity
sp_mat_t Z_o_T = Z_o.transpose();
sp_mat_t M_aux_Woodbury = B.transpose() * D_inv * B + Z_o_T * Z_o;
chol_sp_mat_t CholFac_M_aux_Woodbury;
CholFac_M_aux_Woodbury.compute(M_aux_Woodbury);
if (predict_cov_mat) {
//Using Eigen's solver
sp_mat_t M_aux_Woodbury2 = CholFac_M_aux_Woodbury.solve(Z_o_T);
sp_mat_t Identity_obs(num_data_cli, num_data_cli);
Identity_obs.setIdentity();
sp_mat_t ZoSigmaZoT_plusI_Inv = -Z_o * M_aux_Woodbury2 + Identity_obs;
sp_mat_t ZpSigmaZoT = Z_p * B_inv * D * B_inv.transpose() * Z_o_T;
sp_mat_t M_aux = ZpSigmaZoT * ZoSigmaZoT_plusI_Inv;
mean_pred_id = M_aux * y_[cluster_i];
sp_mat_t Identity_pred(num_data_pred_cli, num_data_pred_cli);
Identity_pred.setIdentity();
cov_mat_pred_id = T1(Z_p * B_inv * D * B_inv.transpose() * Z_p.transpose() + Identity_pred - M_aux * ZpSigmaZoT.transpose());
}
else {
vec_t resp_aux = Z_o_T * y_[cluster_i];
vec_t resp_aux2 = CholFac_M_aux_Woodbury.solve(resp_aux);
resp_aux = y_[cluster_i] - Z_o * resp_aux2;
mean_pred_id = Z_p * B_inv * D * B_inv.transpose() * Z_o_T * resp_aux;
}
}
friend class REModel;
};
} // namespace GPBoost
#endif // GPB_RE_MODEL_TEMPLATE_H_
|
build_search_model.c |
#include <omp.h>
#include <getopt.h>
#include "tldevel.h"
#include "tlmisc.h"
#include "tlrng.h"
#include "tlseqbuffer.h"
#include "randomkit.h"
#include "sequence_struct.h"
#include "sequence_alloc.h"
#include "sequence_io.h"
#include "pst_build.h"
#include "model_struct.h"
#include "model_io.h"
#include "model_alloc.h"
#include "bias_model.h"
#include "thread_data.h"
#include "hmm_conversion.h"
#include "finite_hmm_stats.h"
#include "finite_hmm_alloc.h"
#include "finite_hmm_io.h"
#include "finite_hmm_score.h"
#include "run_score.h"
struct parameters{
char* in_model;
char* out_model;
char* seq_db;
char* cmd_line;
unsigned long seed;
rk_state rndstate;
struct rng_state* rng;
int num_threads;
};
#define OPT_SEQDB 1
#define OPT_SEED 2
static int run_bsm(struct parameters* param);
static int calibrate_all(struct model_bag* mb,struct seqer_thread_data** td);
static void* do_calibrate_per_model(void* threadarg);
//static int find_best_model(struct model_bag*mb, struct seq_buffer* sb, int* best);
static int find_best_model(struct model_bag*mb, struct tl_seq_buffer* sb, int* best);
static int print_help(char **argv);
static void free_param(struct parameters* param);
int main(int argc, char *argv[])
{
struct parameters* param = NULL;
int c;
//print_program_header(argv, "Build HDPHMM model(s).");
MMALLOC(param, sizeof(struct parameters));
param->in_model = NULL;
param->out_model = NULL;
param->seq_db = NULL;
param->cmd_line = NULL;
param->seed = 0;
param->num_threads = 8;
param->rng = NULL;
while (1){
static struct option long_options[] ={
{"in",required_argument,0,'i'},
{"out",required_argument,0,'o'},
{"seqdb",required_argument,0,OPT_SEQDB},
{"seed",required_argument,0,OPT_SEED},
{"nthreads",required_argument,0,'t'},
{"help",0,0,'h'},
{0, 0, 0, 0}
};
int option_index = 0;
c = getopt_long_only (argc, argv,"i:o:t:h",long_options, &option_index);
if (c == -1){
break;
}
switch(c) {
case OPT_SEQDB:
param->seq_db = optarg;
break;
case OPT_SEED:
param->seed = atoi(optarg);
break;
case 'i':
param->in_model = optarg;
break;
case 'o':
param->out_model = optarg;
break;
case 't':
param->num_threads = atoi(optarg);
break;
case 'h':
RUN(print_help(argv));
MFREE(param);
exit(EXIT_SUCCESS);
break;
default:
ERROR_MSG("not recognized");
break;
}
}
LOG_MSG("Starting run");
if(!param->in_model){
RUN(print_help(argv));
ERROR_MSG("No input file! use --in <model.h5>");
}else{
if(!my_file_exists(param->in_model)){
ERROR_MSG("File %s does not exist.", param->in_model);
}
}
if(!param->out_model){
RUN(print_help(argv));
ERROR_MSG("No output file! use --out <searchmodel.h5>");
}else{
if(my_file_exists(param->out_model)){
ERROR_MSG("File %s already exists.", param->out_model);
}
}
if(!param->seq_db){
RUN(print_help(argv));
ERROR_MSG("No seqDB use --seqdb <blah.fa>");
}else{
if(!my_file_exists(param->seq_db)){
RUN(print_help(argv));
ERROR_MSG("The file <%s> does not exist.",param->seq_db);
}
}
if(param->seed){
RUNP(param->rng = init_rng(param->seed));
rk_seed(param->seed, ¶m->rndstate);
}else{
RUNP(param->rng = init_rng(0));
rk_randomseed(¶m->rndstate);
}
RUN(make_cmd_line(¶m->cmd_line,argc,argv));
RUN(run_bsm(param));
free_param(param);
return EXIT_SUCCESS;
ERROR:
free_param(param);
return EXIT_FAILURE;
}
int run_bsm(struct parameters* param)
{
struct model_bag* model_bag = NULL;
struct tl_seq_buffer* sb = NULL;
struct seqer_thread_data** td = NULL;
struct fhmm* bias = NULL;
double* s = NULL;
int i;
int best;
/* read sequences from in model */
RUNP(sb = get_sequences_from_hdf5_model(param->in_model, IHMM_SEQ_READ_ONLY_SEQ));
//RUN(convert_ihmm_seq_buf_into_tl_seq_buf(s, &sb));
/*LOG_MSG("%d",sb->L);
for(i = 0; i < sb->num_seq;i++){
LOG_MSG("%s",sb->sequences[i]->name);
}*/
/* train PST */
RUN(create_pst_model(param->rng,sb, NULL, param->seq_db, param->out_model,0.00001, 0.01, 20.0));
//sb = NULL;
/* read all models */
RUNP(model_bag = read_model_bag_hdf5(param->in_model ));
RUN(create_seqer_thread_data(&td,param->num_threads, 1024 , 128, ¶m->rndstate));
/* convert to fhmmm */
RUN(convert_ihmm_to_fhmm_models(model_bag));
/* calibrate */
RUN(calibrate_all(model_bag, td));
/* WARNING NEED TO ADD STORAGE FOR SCORES !!!! */
//RUN(add_multi_model_label_and_u(s, model_bag->num_models));
for(i = 0; i < sb->num_seq;i++){
s = NULL;
MMALLOC(s, sizeof(double) * model_bag->num_models);
sb->sequences[i]->data = s;
}
/* score all training sequences */
RUN(run_score_sequences( model_bag->finite_models,sb, td, model_bag->num_models, FHMM_SCORE_P_LODD));
/* assign best */
RUN(find_best_model(model_bag, sb, &best));
LOG_MSG("Best model: %d",best);
//write
RUN(build_bias_model(model_bag->finite_models[best], &bias));
RUN(write_biashmm(param->out_model, bias));
RUN(write_searchfhmm(param->out_model, model_bag->finite_models[best]));
for(i = 0; i < sb->num_seq;i++){
MFREE(sb->sequences[i]->data);
sb->sequences[i]->data = NULL;
}
free_tl_seq_buffer(sb);
free_model_bag(model_bag);
free_seqer_thread_data(td);
free_fhmm(bias);
return OK;
ERROR:
return FAIL;
}
int find_best_model(struct model_bag*mb, struct tl_seq_buffer* sb, int* best)
{
double* total_e = NULL;
double* s;
int i,j;
double min;
RUN(galloc(&total_e, mb->num_models));
for(j = 0; j < mb->num_models;j++){
total_e[j] = 0.0;
}
for(i= 0 ;i < sb->num_seq;i++){
s = sb->sequences[i]->data;
for(j = 0; j < mb->num_models;j++){
total_e[j] += s[j];
//fprintf(stdout,"%f %f ", s->sequences[i]->score_arr[j], esl_exp_logsurv(s->sequences[i]->score_arr[j], mb->finite_models[j]->tau,mb->finite_models[j]->lambda));
}
//fprintf(stdout,"\n");
}
j = -1;
min = 1.0;
for(i = 0; i < mb->num_models;i++){
LOG_MSG(" Model %d: %d states: %f", i, mb->finite_models[i]->K, total_e[i]);
if(total_e[i] < min){
min = total_e[i];
j =i;
}
}
gfree(total_e);
*best = j;
return OK;
ERROR:
return FAIL;
}
/* calibrate all models */
int calibrate_all(struct model_bag* mb,struct seqer_thread_data** td)
{
int i,j,c;
int num_threads = td[0]->num_threads;
int run;
ASSERT(mb != NULL, "No models");
c = 0;
for(run = 0; run < mb->num_models;run+= num_threads){
j = 0;
for(i = 0; i < num_threads;i++){
td[i]->thread_ID = i;
td[i]->model_ID = c;
td[i]->fhmm = mb->finite_models;
//td[i]->sb = sb;
//LOG_MSG("Cal %d",c);
j++;
c++;
if(c == mb->num_models){
break;
}
}
#ifdef HAVE_OPENMP
omp_set_num_threads( MACRO_MIN(num_threads,j));
#pragma omp parallel shared(td) private(i)
{
#pragma omp for schedule(dynamic) nowait
#endif
for(i = 0; i < j;i++){
do_calibrate_per_model(td[i]);
}
#ifdef HAVE_OPENMP
}
#endif
}
return OK;
ERROR:
return FAIL;
}
void* do_calibrate_per_model(void* threadarg)
{
struct seqer_thread_data *data;
data = (struct seqer_thread_data *) threadarg;
int r;
r = rk_random(&data->rndstate);
fhmm_calibrate(data->fhmm[data->model_ID], data->fmat, r);
//LOG_MSG("Model %d: %f %f", data->model_ID, data->fhmm[data->model_ID]->lambda,data->fhmm[data->model_ID]->tau);
return NULL;
}
int print_help(char **argv)
{
const char usage[] = " -i <ihmm model> -out <search model>";
char* tmp = NULL;
RUN(tlfilename(argv[0], &tmp));
fprintf(stdout,"\nUsage: %s [-options] %s\n\n",tmp,usage);
fprintf(stdout,"Options:\n\n");
fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--seqdb","Reference database." ,"[8]" );
fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--nthreads","Number of threads." ,"[8]" );
fprintf(stdout,"%*s%-*s: %s %s\n",3,"",MESSAGE_MARGIN-3,"--seed","Seed" ,"[NA]" );
MFREE(tmp);
return OK;
ERROR:
MFREE(tmp);
return FAIL;
}
void free_param(struct parameters* param)
{
if(param){
if(param->cmd_line){
gfree(param->cmd_line);
}
if(param->rng){
free_rng(param->rng);
}
MFREE(param);
}
}
|
GB_binop__land_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_08__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_02__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_04__land_int64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int64)
// A*D function (colscale): GB (_AxD__land_int64)
// D*A function (rowscale): GB (_DxB__land_int64)
// C+=B function (dense accum): GB (_Cdense_accumB__land_int64)
// C+=b function (dense accum): GB (_Cdense_accumb__land_int64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int64)
// C=scalar+B GB (_bind1st__land_int64)
// C=scalar+B' GB (_bind1st_tran__land_int64)
// C=A+scalar GB (_bind2nd__land_int64)
// C=A'+scalar GB (_bind2nd_tran__land_int64)
// C type: int64_t
// A type: int64_t
// B,b type: int64_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int64_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
int64_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
int64_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ((x != 0) && (y != 0)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__land_int64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int64_t
int64_t bwork = (*((int64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__land_int64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *restrict Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__land_int64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__land_int64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__land_int64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__land_int64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int64_t *Bx = (int64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
int64_t bij = GBX (Bx, p, false) ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__land_int64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int64_t y = (*((int64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = GBX (Ax, p, false) ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB (_bind1st_tran__land_int64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB (_bind2nd_tran__land_int64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t y = (*((const int64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unaryop__one_bool_bool.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__one_bool_bool
// op(A') function: GB_tran__one_bool_bool
// C type: bool
// A type: bool
// cast: ;
// unaryop: cij = true
#define GB_ATYPE \
bool
#define GB_CTYPE \
bool
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = true ;
// casting
#define GB_CASTING(z, aij) \
; ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_BOOL)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__one_bool_bool
(
bool *Cx, // Cx and Ax may be aliased
bool *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__one_bool_bool
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
gemv_x_dia.c | #include "alphasparse/kernel.h"
#include "alphasparse/opt.h"
#include "alphasparse/util.h"
#include <string.h>
#ifdef _OPENMP
#include <omp.h>
#endif
static alphasparse_status_t ONAME_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
const ALPHA_INT m = A->rows;
const ALPHA_INT n = A->cols;
const ALPHA_INT thread_num = alpha_get_thread_num();
ALPHA_Number** tmp = (ALPHA_Number**)malloc(sizeof(ALPHA_Number*) * thread_num);
for(int i = 0; i < thread_num; ++i)
{
tmp[i] = malloc(sizeof(ALPHA_Number) * m);
memset(tmp[i], 0, sizeof(ALPHA_Number) * m);
}
const ALPHA_INT diags = A->ndiag;
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for (ALPHA_INT i = 0; i < diags; ++i)
{
const ALPHA_INT threadId = alpha_get_thread_id();
const ALPHA_INT dis = A->distance[i];
const ALPHA_INT row_start = alpha_max(0, -dis);
const ALPHA_INT col_start = alpha_max(0, dis);
const ALPHA_INT nnz = (m - row_start)<(n - col_start)?(m - row_start):(n - col_start);
const ALPHA_INT start = i * A->lval;
for (ALPHA_INT j = 0; j < nnz; ++j)
{
ALPHA_Number v;
alpha_mul(v, alpha, A->values[start + row_start + j]);
alpha_madde(tmp[threadId][row_start + j], v, x[col_start + j]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(ALPHA_INT i = 0; i < m; ++i)
{
alpha_mul(y[i], y[i], beta);
for(ALPHA_INT j = 0; j < thread_num; ++j)
{
alpha_add(y[i], y[i], tmp[j][i]);
}
}
#ifdef _OPENMP
#pragma omp parallel for num_threads(thread_num)
#endif
for(int i = 0; i < thread_num; ++i)
{
alpha_free(tmp[i]);
}
alpha_free(tmp);
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_DIA* A,
const ALPHA_Number* x,
const ALPHA_Number beta,
ALPHA_Number* y)
{
return ONAME_omp(alpha, A, x, beta, y);
}
|
3d25pt_var.c | /*
* Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+8;
Ny = atoi(argv[2])+8;
Nz = atoi(argv[3])+8;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*13);
for(m=0; m<13;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 8;
tile_size[1] = 8;
tile_size[2] = 4;
tile_size[3] = 2048;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<13; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt; t++) {
for (i = 4; i < Nz-4; i++) {
for (j = 4; j < Ny-4; j++) {
for (k = 4; k < Nx-4; k++) {
A[(t+1)%2][i][j][k] =
coef[0][i][j][k] * A[(t)%2][i ][j ][k ] +
coef[1][i][j][k] * (A[(t)%2][i-1][j ][k ] + A[(t)%2][i+1][j ][k ]) +
coef[2][i][j][k] * (A[(t)%2][i ][j-1][k ] + A[(t)%2][i ][j+1][k ]) +
coef[3][i][j][k] * (A[(t)%2][i ][j ][k-1] + A[(t)%2][i ][j ][k+1]) +
coef[4][i][j][k] * (A[(t)%2][i-2][j ][k ] + A[(t)%2][i+2][j ][k ]) +
coef[5][i][j][k] * (A[(t)%2][i ][j-2][k ] + A[(t)%2][i ][j+2][k ]) +
coef[6][i][j][k] * (A[(t)%2][i ][j ][k-2] + A[(t)%2][i ][j ][k+2]) +
coef[7][i][j][k] * (A[(t)%2][i-3][j ][k ] + A[(t)%2][i+3][j ][k ]) +
coef[8][i][j][k] * (A[(t)%2][i ][j-3][k ] + A[(t)%2][i ][j+3][k ]) +
coef[9][i][j][k] * (A[(t)%2][i ][j ][k-3] + A[(t)%2][i ][j ][k+3]) +
coef[10][i][j][k]* (A[(t)%2][i-4][j ][k ] + A[(t)%2][i+4][j ][k ]) +
coef[11][i][j][k]* (A[(t)%2][i ][j-4][k ] + A[(t)%2][i ][j+4][k ]) +
coef[12][i][j][k]* (A[(t)%2][i ][j ][k-4] + A[(t)%2][i ][j ][k+4]) ;
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(4, "variable axis-symmetric")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<13;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
openmp_common.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
void foo() {
#pragma omp // expected-error {{expected an OpenMP directive}}
#pragma omp unknown_directive // expected-error {{expected an OpenMP directive}}
}
|
masked-spgemm.h | #ifndef SPGEMM_GENERIC_H
#define SPGEMM_GENERIC_H
#include <algorithm>
#include "../CSR.h"
#include "scan.h"
#include "util.h"
#include "hash/HashAccumulator.h"
#include "hash/MaskedHashAlgorithm.h"
#include "msa/MSAAlgorithm_old.h"
#include "msa/MSAAlgorithm.h"
#include "mca/MCAAlgorithm.h"
#include "heap/MaskedHeapAlgorithm.h"
#include "heap/MaskedHeapAlgorithmInspect.h"
template<template<class, class> class RowAlgorithm, class IT, class NT, class MultiplyOperation, class AddOperation>
void MaskedSpGEMM1p(const CSR<IT, NT> &A, const CSR<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) {
using RowAlg = RowAlgorithm<IT, NT>;
const bool Complemented = RowAlg::COMPLEMENTED;
// Calculate number of threads and init C
setNumThreads(numThreads);
verifyInputs(A, B, C, M);
// Estimate work
IT *flopsPerRow = my_malloc<IT>(A.rows, false);
IT flops = Complemented ? calculateFlops(A, B, flopsPerRow, numThreads)
: calculateFlops(A, B, M, flopsPerRow, numThreads);
// Calculate cumulative work
IT *cumulativeWork = my_malloc<IT>(A.rows, false);
exclusiveScan(flopsPerRow, A.rows, cumulativeWork, numThreads);
// Allocate memory for row sizes
IT *rowNvals = my_malloc<IT>(A.rows, false);
IT *threadsNvals = my_malloc<IT>(numThreads, false);
#pragma omp parallel num_threads(numThreads)
{
int thisThread = omp_get_thread_num();
// Distribute work
auto[rowBeginIdx, rowEndIdx] = distributeWork(flops, cumulativeWork, A.rows, numThreads, thisThread);
// Scan the input matrices
auto[upperBoundSizeC, maxRowSizeA, maxRowSizeM, maxRowFlops]
= scanInputs<Complemented,
true,
RowAlg::CALC_MAX_ROW_SIZE_A,
RowAlg::CALC_MAX_ROW_SIZE_M,
RowAlg::CALC_MAX_ROW_FLOPS>(rowBeginIdx, rowEndIdx, flopsPerRow, A, B, M);
// Initialize row algorithm
RowAlg alg{B.cols, maxRowSizeA, maxRowSizeM, maxRowFlops};
auto[bufferSize, bufferAlignment] = alg.getMemoryRequirement();
auto buffer = mallocAligned(bufferSize, bufferAlignment);
size_t dirty = bufferSize;
// Allocate temporary memory for C's column IDs and Values
IT *colIdsLocal = my_malloc<IT>(upperBoundSizeC, false);
NT *valuesLocal = my_malloc<NT>(upperBoundSizeC, false);
IT *currColId = colIdsLocal;
NT *currValue = valuesLocal;
// Numeric phase
alg.getNumericAccumulator().setBuffer(buffer, bufferSize, dirty);
for (IT row = rowBeginIdx; row < rowEndIdx; ++row) {
if (flopsPerRow[row]) {
auto rowColIdBegin = currColId;
alg.numericRow(A, B, M, multop, addop, row, currColId, currValue, flopsPerRow[row]);
rowNvals[row] = currColId - rowColIdBegin;
} else {
rowNvals[row] = 0;
}
}
threadsNvals[thisThread] = currColId - colIdsLocal;
dirty = alg.getNumericAccumulator().releaseBuffer();
#pragma omp barrier
#pragma omp master
{
initC(A, B, C, threadsNvals, numThreads);
}
#pragma omp barrier
setRowOffsets(C, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread);
copyValuesToC(C, rowBeginIdx, colIdsLocal, valuesLocal, threadsNvals[thisThread]);
my_free(colIdsLocal, valuesLocal);
freeAligned(buffer);
}
my_free(flopsPerRow, cumulativeWork, rowNvals, threadsNvals);
}
template<template<class, class> class RowAlgorithm, class IT, class NT, class MultiplyOperation, class AddOperation>
void MaskedSpGEMM2p(const CSR<IT, NT> &A, const CSR<IT, NT> &B, CSR<IT, NT> &C, const CSR<IT, NT> &M,
MultiplyOperation multop, AddOperation addop, unsigned numThreads = 0) {
using RowAlg = RowAlgorithm<IT, NT>;
const bool Complemented = RowAlg::COMPLEMENTED;
CSR<IT, NT> R;
// Calculate number of threads and init C
setNumThreads(numThreads);
verifyInputs(A, B, C, M);
// Estimate work
IT *flopsPerRow = my_malloc<IT>(A.rows, false);
IT flops = Complemented ? calculateFlops(A, B, flopsPerRow, numThreads)
: calculateFlops(A, B, M, flopsPerRow, numThreads);
// Calculate cumulative work
IT *cumulativeWork = my_malloc<IT>(A.rows, false);
exclusiveScan(flopsPerRow, A.rows, cumulativeWork, numThreads);
// Allocate memory for row sizes
IT *rowNvals = my_malloc<IT>(A.rows, false);
IT *threadsNvals = my_malloc<IT>(numThreads, false);
#pragma omp parallel num_threads(numThreads)
{
int thisThread = omp_get_thread_num();
// Distribute work
auto[rowBeginIdx, rowEndIdx] = distributeWork(flops, cumulativeWork, A.rows, numThreads, thisThread);
// Scan the input matrices
auto[upperBoundSizeC, maxRowSizeA, maxRowSizeM, maxRowFlops]
= scanInputs<Complemented,
false,
RowAlg::CALC_MAX_ROW_SIZE_A,
RowAlg::CALC_MAX_ROW_SIZE_M,
RowAlg::CALC_MAX_ROW_FLOPS>(rowBeginIdx, rowEndIdx, flopsPerRow, A, B, M);
// Initialize row algorithm
RowAlg alg{B.cols, maxRowSizeA, maxRowSizeM, maxRowFlops};
auto[bufferSize, bufferAlignment] = alg.getMemoryRequirement();
auto buffer = mallocAligned(bufferSize, bufferAlignment);
size_t dirty = bufferSize;
// Symbolic phase
alg.getSymbolicAccumulator().setBuffer(buffer, bufferSize, dirty);
IT nvals = 0;
for (IT row = rowBeginIdx; row < rowEndIdx; row++) {
if (flopsPerRow[row]) {
alg.symbolicRow(A, B, M, row, rowNvals, flopsPerRow[row]);
} else {
rowNvals[row] = 0;
}
nvals += rowNvals[row];
}
threadsNvals[thisThread] = nvals;
dirty = alg.getSymbolicAccumulator().releaseBuffer();
// init C
#pragma omp barrier
#pragma omp master
{
initC(A, B, R, threadsNvals, numThreads);
}
#pragma omp barrier
setRowOffsets(R, threadsNvals, rowBeginIdx, rowEndIdx, rowNvals, numThreads, thisThread);
// Numeric phase
alg.getNumericAccumulator().setBuffer(buffer, bufferSize, dirty);
IT *currColId = &R.colids[R.rowptr[rowBeginIdx]];
NT *currValue = &R.values[R.rowptr[rowBeginIdx]];
for (IT row = rowBeginIdx; row < rowEndIdx; ++row) {
if (rowNvals[row] == 0) { continue; }
alg.numericRow(A, B, M, multop, addop, row, currColId, currValue, rowNvals[row]);
}
dirty = alg.getNumericAccumulator().releaseBuffer();
freeAligned(buffer);
}
my_free(flopsPerRow, cumulativeWork, rowNvals, threadsNvals);
// TODO: use move ctr
C.make_empty();
C.rows = R.rows;
C.cols = R.cols;
C.nnz = R.nnz;
C.rowptr = R.rowptr;
C.colids = R.colids;
C.values = R.values;
R.rows = 0;
R.cols = 0;
R.nnz = 0;
}
#endif //SPGEMM_GENERIC_H
|
kernel1.h | /*
#include "worklist.h"
#include <vector>
#include <set>
using namespace std;
*/
#define MAXCOLOR 128
void FirstFit(int m, int nnz, int *csrRowPtr, int *csrColInd, Worklist &inwl, int *coloring)
{
unsigned start, end;
int ii;
start = inwl.start;
end = inwl.end;
#ifdef ENABLE_OPENMP
#pragma omp parallel for
#endif
for (ii = start; ii < end; ii++) {
int j, node, neighbors, neighbor_j;
node = inwl.getItem(ii);
int neighboroffset = csrRowPtr[node];
neighbors = csrRowPtr[node + 1] - neighboroffset;
unsigned v[MAXCOLOR / 32];
v[0] = 0xfffffffe;
for (j = 1; j < MAXCOLOR / 32; j++)
v[j] = 0xffffffff;
for (j = 0; j < neighbors; j++) {
neighbor_j = csrColInd[neighboroffset + j];
int color_j = coloring[neighbor_j];
if (color_j)
v[color_j / 32] &= ~(1 << (color_j % 32));
}
int c = 32;
for (int i = 0; i < MAXCOLOR / 32; i++) {
if (v[i] != 0) {
v[i] &= -(signed)v[i];
if (v[i]) c--;
if (v[i] & 0x0000ffff) c -= 16;
if (v[i] & 0x00ff00ff) c -= 8;
if (v[i] & 0x0f0f0f0f) c -= 4;
if (v[i] & 0x33333333) c -= 2;
if (v[i] & 0x55555555) c -= 1;
break;
}
else
c += 32;
}
coloring[node] = c;
}
}
void conflictDetect(int m, int nnz, int *csrRowPtr, int *csrColInd, Worklist &inwl, Worklist &outwl, int *coloring)
{
unsigned start, end;
int ii;
//inwl.myItems(start, end);
start = inwl.start;
end = inwl.end;
//printf("inwl=%d, outwl=%d, start=%d, end=%d\n", inwl.getSize(), outwl.getSize(), start, end);
#ifdef ENABLE_OPENMP
#pragma omp parallel for
#endif
for (ii = start; ii < end; ii++) {
int j, node, neighbors, neighbor_j;
node = inwl.getItem(ii);
//if (node == -1)
//continue;
int neighboroffset = csrRowPtr[node];
neighbors = csrRowPtr[node + 1] - neighboroffset;
//neighbors = graph.noutgoing[node];
for (j = 0; j < neighbors; j++) {
//neighbor_j = graph.edgessrcdst[graph.psrc[node] + j];
neighbor_j = csrColInd[neighboroffset + j];
if (coloring[node] == coloring[neighbor_j] && node < neighbor_j) {
//printf("c[%d] = c[%d] = %d\n", node, neighbor_j, coloring[node]);
outwl.push(node);
break;
}
}
//if (j == neighbors)
//printf("%d ok\tcolor[%d]=%d\n", node, node, coloring[node]);
}
}
void findMax(int *coloring, int n, int *ncolors) {
int i;
for (i = 0; i < n; i++) {
//printf("coloring[%d]=%d\n", i, coloring[i]);
if (coloring[i] > *ncolors)
*ncolors = coloring[i];
}
}
void color(int m, int nnz, int *csrRowPtr, int *csrColInd, int *ncolors, int *coloring)
{
Worklist inwl, outwl, *inwlptr, *outwlptr, *tmp;
double starttime, endtime;
double runtime;
//int nnodes = graph.nnodes;
inwl.ensureSpace(m);
outwl.ensureSpace(m);
inwlptr = &inwl;
outwlptr = &outwl;
unsigned *range;
range = (unsigned *)malloc(m * sizeof(unsigned));
for (unsigned i = 0; i < m; i++)
range[i] = i;
//inwl.pushRange(graph.srcsrc, nnodes);
inwl.pushRange(range, m);
int iteration = 0;
unsigned wlsz = inwl.getSize();
//printf("wlsz=%d, outwl=%d\n", wlsz, outwl.getSize());
//printf("solving.\n");
starttime = rtclock();
#ifdef ENABLE_OPENMP
while (wlsz) {
++iteration;
#endif
//FirstFit(graph, *inwlptr, coloring);
FirstFit(m, nnz, csrRowPtr, csrColInd, *inwlptr, coloring);
#ifdef ENABLE_OPENMP
__syncthreads();
//printf("ok\n");
//conflictDetect(graph, *inwlptr, *outwlptr, coloring);
conflictDetect(m, nnz, csrRowPtr, csrColInd, *inwlptr, *outwlptr, coloring);
__syncthreads();
//printf("ok\n");
//printf("iteration %d:inwl=%d, outwl=%d\n", iteration, wlsz, outwlptr->getSize());
wlsz = outwlptr->getSize();
tmp = inwlptr; inwlptr = outwlptr; outwlptr = tmp;
outwlptr->clear();
}
#endif
endtime = rtclock();
//verify<<<(nnodes - 1) / 1024 + 1, 1024>>>(graph, coloring, correct);
//CUDA_SAFE_CALL(cudaDeviceSynchronize());
//if (*correct) {
//findMax<<<(nnodes - 1) / 1024 + 1, 1024>>>(coloring, nnodes, ncolors);
findMax(coloring, m, ncolors);
//CUDA_SAFE_CALL(cudaDeviceSynchronize());
//}
runtime = (1000.0f * (endtime - starttime));
printf("runtime=%f\tcolors=%d\t", runtime, *ncolors);
}
|
elemwise_binary_op.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2016 by Contributors
* \file elemwise_binary_op.h
* \brief Function definition of elementwise binary operators
*/
#ifndef MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#define MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
#include <mxnet/operator_util.h>
#include <mxnet/op_attr_types.h>
#include <vector>
#include <string>
#include <utility>
#include <typeinfo>
#include <algorithm>
#include "../mxnet_op.h"
#include "../mshadow_op.h"
#include "../../engine/openmp.h"
#include "elemwise_unary_op.h"
#include "../../common/utils.h"
#include "./init_op.h"
namespace mxnet {
namespace op {
/*! Gather binary operator functions into ElemwiseBinaryOp class */
class ElemwiseBinaryOp : public OpBase {
public:
/*! \brief For sparse, assume missing rvalue is 0 */
template<typename OP, int Req>
struct MissingRValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *lhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(lhs[i], DType(0)));
}
};
/*! \brief For sparse, assume missing lvalue is 0 */
template<typename OP, int Req>
struct MissingLValueOp {
typedef OP Operation;
template<typename DType>
MSHADOW_XINLINE static void Map(int i, DType *out, const DType *rhs) {
KERNEL_ASSIGN(out[i], Req, OP::Map(DType(0), rhs[i]));
}
};
private:
/*!
* \brief CSR operation requires temp space
*/
enum ResourceRequestType {
kTempSpace
};
/*!
* \brief Fill contiguous dense output rows with value computed from 0 lhs and 0 rhs input
* CPU-Only version
*/
template<typename DType, typename OP, typename xpu>
static inline size_t FillDense(mshadow::Stream<xpu> *s,
const size_t idx_l,
const size_t idx_r,
const OpReqType req,
mshadow::Tensor<xpu, 2, DType> *out,
const size_t iter_out) {
const int index_out_min = static_cast<int>(std::min(idx_l, idx_r));
if (static_cast<size_t>(index_out_min) > iter_out) {
const DType zero_input_val = OP::Map(DType(0), DType(0));
#pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount())
for (int i = static_cast<int>(iter_out); i < index_out_min; ++i) {
Fill<false>(s, (*out)[i], req, zero_input_val);
}
}
return static_cast<size_t>(index_out_min); // MSVC wants OMP loops to always use 'int'
}
static inline bool IsSameArray(const NDArray& a1, const NDArray& a2) {
return a1.var() == a2.var();
}
/*! \brief Minimum of three */
static MSHADOW_XINLINE size_t minthree(const size_t a, const size_t b, const size_t c) {
return a < b ? (a < c ? a : c) : (b < c ? b : c);
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseNone_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
Stream<xpu> *s = ctx.get_stream<xpu>();
const int size = static_cast<int>((outputs[0].Size() + DataType<DType>::kLanes - 1)
/ DataType<DType>::kLanes);
const DType *ograd_dptr = inputs[0].dptr<DType>();
if (std::is_same<LOP, mshadow_op::identity>::value && req[0] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[0].dptr<DType>());
} else if (req[0] != kNullOp) {
DType *lgrad_dptr = outputs[0].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
Kernel<mxnet_op::op_with_req<LOP, Req>, xpu>::Launch(s, size, lgrad_dptr, ograd_dptr);
});
}
if (std::is_same<ROP, mshadow_op::identity>::value && req[1] == kWriteInplace) {
CHECK_EQ(ograd_dptr, outputs[1].dptr<DType>());
} else if (req[1] != kNullOp) {
DType *rgrad_dptr = outputs[1].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
Kernel<mxnet_op::op_with_req<ROP, Req>, xpu>::Launch(s, size, rgrad_dptr, ograd_dptr);
});
}
}
template<typename xpu, typename LOP, typename ROP, typename DType>
static void BackwardUseIn_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
DCHECK_EQ(outputs.size(), 2U);
DCHECK_EQ(inputs.size(), 3U);
mxnet_op::Stream<xpu> *s = ctx.get_stream<xpu>();
const DType *ograd_dptr = inputs[0].dptr<DType>();
const DType *lhs_dptr = inputs[1].dptr<DType>();
const DType *rhs_dptr = inputs[2].dptr<DType>();
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
const int size = static_cast<int>(
(outputs[0].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * lgrad_dptr = outputs[0].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<LOP>, Req>, xpu>::Launch(
s, size, lgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
MXNET_ASSIGN_REQ_SWITCH(req[1], Req, {
const int size = static_cast<int>(
(outputs[1].Size() + mxnet_op::DataType<DType>::kLanes - 1)
/ mxnet_op::DataType<DType>::kLanes);
DType * rgrad_dptr = outputs[1].dptr<DType>();
mxnet_op::Kernel<mxnet_op::op_with_req<mxnet_op::backward_grad_tuned<ROP>, Req>, xpu>::Launch(
s, size, rgrad_dptr, ograd_dptr, lhs_dptr, rhs_dptr);});
}
template<
typename xpu,
typename LOP,
typename ROP,
typename DType,
bool in0_ok_dense = false,
bool in1_ok_dense = false,
bool in2_ok_dense = false,
typename BackupCompute>
static inline void BackwardUseInEx_(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs,
BackupCompute backup_compute) {
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
// lhs grad
if (req[0] != kNullOp) {
// RspRspOp can handle dense outputs so long as OP(0, 0) == 0
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, LOP>(
s, attrs, ctx, inputs[1], inputs[2], req[0], outputs[0],
false, false, false, false);
});
// lhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, op::mshadow_op::mul>(
s, attrs, ctx, outputs[0], inputs[0], req[0], outputs[0],
false, false, true, false);
});
}
// rhs grad
if (req[1] != kNullOp) {
MSHADOW_IDX_TYPE_SWITCH(inputs[1].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, ROP>(
s, attrs, ctx, inputs[1], inputs[2], req[1], outputs[1],
false, false, false, false);
});
// rhs in-place
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, op::mshadow_op::mul>(
s, attrs, ctx, inputs[0], outputs[1], req[1], outputs[1],
false, false, true, false);
});
}
}
protected:
/*! \brief Binary op handling for lhr/rhs: RspDns, RspRsp, DnsRsp, or RspRsp->Dns result */
template<typename DType, typename IType, typename OP>
static void RspRspOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
bool lhs_may_be_dense,
bool rhs_may_be_dense,
bool allow_inplace,
bool scatter);
/*! \brief CSR -op- CSR binary operator for non-canonical NDArray */
template<typename DType, typename IType, typename CType, typename OP>
static inline void CsrCsrOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output);
/*! \brief DNS -op- CSR binary operator for non-canonical NDArray */
template<typename OP>
static inline void DnsCsrDnsOp(mshadow::Stream<cpu> *s,
const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const NDArray &lhs,
const NDArray &rhs,
OpReqType req,
const NDArray &output,
const bool reverse);
public:
/*!
* \brief Rsp-op-Rsp operation which produces a dense result
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool SparseSparseWithDenseResult(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
/*!
* \brief Allow one of the binary inputs to be dense and still produce a sparse output.
* Typically used for sparse * dense = sparse.
* Note: for csr, it dispatches to fallback other than csr, csr -> csr
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool PreferSparseStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2U) << " in operator " << attrs.name;
CHECK_EQ(out_attrs->size(), 1U) << " in operator " << attrs.name;
const auto& lhs_stype = in_attrs->at(0);
const auto& rhs_stype = in_attrs->at(1);
auto& out_stype = out_attrs->at(0);
bool dispatched = false;
const bool invalid_ctx = dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns -> dns
dispatched = storage_type_assign(&out_stype, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr -> csr
dispatched = storage_type_assign(&out_stype, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched &&
((lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage))) {
// rsp, dns -> rsp
// dns, rsp -> rsp
dispatched = storage_type_assign(&out_stype, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatched = dispatch_fallback(out_attrs, dispatch_mode);
}
return dispatched;
}
/*!
* \brief Allow one of the inputs to be dense and produce a dense output,
* for rsp inputs only support when both inputs are rsp type.
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
template<bool cpu_only, bool rsp, bool csr>
static bool PreferDenseStorageType(const nnvm::NodeAttrs& attrs,
const int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs) {
using namespace common;
CHECK_EQ(in_attrs->size(), 2);
CHECK_EQ(out_attrs->size(), 1);
const auto lhs_stype = (*in_attrs)[0];
const auto rhs_stype = (*in_attrs)[1];
bool dispatched = false;
const bool invalid_ctx = cpu_only && dev_mask != mshadow::cpu::kDevMask;
const auto dispatch_ex = invalid_ctx ? DispatchMode::kFComputeFallback :
DispatchMode::kFComputeEx;
if (!dispatched && ContainsOnlyStorage(*in_attrs, kDefaultStorage)) {
// dns, dns ... -> dns
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, DispatchMode::kFCompute);
}
if (!dispatched && rsp && ContainsOnlyStorage(*in_attrs, kRowSparseStorage)) {
// rsp, rsp, ... -> rsp
dispatched = storage_type_assign(out_attrs, kRowSparseStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && csr && ContainsOnlyStorage(*in_attrs, kCSRStorage)) {
// csr, csr, ... -> csr
dispatched = storage_type_assign(out_attrs, kCSRStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched && ((lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage) ||
(lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage))) {
// dense, csr -> dense / csr, dense -> dense
dispatched = storage_type_assign(out_attrs, kDefaultStorage,
dispatch_mode, dispatch_ex);
}
if (!dispatched) {
dispatch_fallback(out_attrs, dispatch_mode);
}
return true;
}
/*!
* \brief Backward pass computing input gradient using forward inputs
* \param attrs Attributes
* \param dev_mask Device mask
* \param dispatch_mode Dispatch Mode
* \param in_attrs Input storage attributes
* \param out_attrs Output storage attributes
* \return true if handled
*/
static bool BackwardUseInStorageType(const nnvm::NodeAttrs& attrs,
int dev_mask,
DispatchMode* dispatch_mode,
std::vector<int> *in_attrs,
std::vector<int> *out_attrs);
template<typename xpu, typename OP>
static void Compute(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
using namespace mxnet_op;
if (req[0] != kNullOp) {
Stream<xpu> *s = ctx.get_stream<xpu>();
CHECK_EQ(inputs.size(), 2U);
CHECK_EQ(outputs.size(), 1U);
MXNET_ASSIGN_REQ_SWITCH(req[0], Req, {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
const size_t size = (minthree(outputs[0].Size(), inputs[0].Size(), inputs[1].Size())
+ DataType<DType>::kLanes - 1) / DataType<DType>::kLanes;
Kernel<mxnet_op::op_with_req<OP, Req>, xpu>::Launch(s, size,
outputs[0].dptr<DType>(),
inputs[0].dptr<DType>(), inputs[1].dptr<DType>());
});
});
}
}
template<typename xpu, typename OP>
static void ComputeEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
if ((ContainsOnlyStorage(inputs, kRowSparseStorage)) &&
(out_stype == kRowSparseStorage || out_stype == kDefaultStorage)) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
const int rsp_input_idx = lhs_stype == kRowSparseStorage ? 0 : 1;
MSHADOW_IDX_TYPE_SWITCH(inputs[rsp_input_idx].aux_type(rowsparse::kIdx), IType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0], false, false, false, false);
});
});
} else if (ContainsOnlyStorage(inputs, kCSRStorage) && out_stype == kCSRStorage) {
// csr, csr -> csr
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIdx), IType, {
MSHADOW_IDX_TYPE_SWITCH(inputs[0].aux_type(csr::kIndPtr), CType, {
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
CsrCsrOp<DType, IType, CType, OP>(
s, attrs, ctx, inputs[0], inputs[1], req[0], outputs[0]);
});
});
});
} else if (((lhs_stype == kCSRStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kCSRStorage)) &&
out_stype == kDefaultStorage) {
const NDArray& dns = (lhs_stype == kDefaultStorage)? inputs[0] : inputs[1];
const NDArray& csr = (lhs_stype == kCSRStorage)? inputs[0] : inputs[1];
const bool reverse = (lhs_stype == kCSRStorage);
DnsCsrDnsOp<OP>(s, attrs, ctx, dns, csr, req[0], outputs[0], reverse);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
/*! \brief ComputeEx allowing dense lvalue and/or rvalue */
template<typename xpu, typename OP, bool lhs_may_be_dense, bool rhs_may_be_dense>
static void ComputeDnsLRValueEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(inputs.size(), 2);
CHECK_EQ(outputs.size(), 1);
if (req[0] == kNullOp) return;
const auto lhs_stype = inputs[0].storage_type();
const auto rhs_stype = inputs[1].storage_type();
const auto out_stype = outputs[0].storage_type();
if ((out_stype == kRowSparseStorage || out_stype == kDefaultStorage) &&
((lhs_stype == kRowSparseStorage && rhs_stype == kRowSparseStorage) ||
(lhs_stype == kRowSparseStorage && rhs_stype == kDefaultStorage) ||
(lhs_stype == kDefaultStorage && rhs_stype == kRowSparseStorage)) &&
lhs_may_be_dense && rhs_may_be_dense) {
// rsp, rsp -> rsp
// rsp, rsp -> dns
// rsp, dns -> rsp
// dns, rsp -> rsp
// More than once dense not allowed (this will be checked in RspRspOp):
// rsp, dns -> dns <-- NOT ALLOWED
// dns, rsp -> dns <-- NOT ALLOWED
mshadow::Stream<xpu> *s = ctx.get_stream<xpu>();
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(outputs[0].aux_type(rowsparse::kIdx), IType, {
RspRspOp<DType, IType, OP>(
s, attrs, ctx, inputs[0], inputs[1],
req[0], outputs[0], lhs_may_be_dense, rhs_may_be_dense, false, false);
});
});
} else if (lhs_stype == kCSRStorage && rhs_stype == kCSRStorage) {
ComputeEx<xpu, OP>(attrs, ctx, inputs, req, outputs);
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNone(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseNone_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseNoneEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
CHECK_EQ(inputs.size(), 1U); // output grad
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto in_stype = inputs[0].storage_type();
const auto lhs_stype = outputs[0].storage_type();
const auto rhs_stype = outputs[1].storage_type();
// lhs grad
if (req[0] != kNullOp) {
if (in_stype == lhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> rsp, _. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(LOP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, LOP>(attrs, ctx, inputs, req, {outputs[0]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
// rhs grad
if (req[1] != kNullOp) {
if (in_stype == rhs_stype && (in_stype == kRowSparseStorage || in_stype == kCSRStorage)) {
CHECK_EQ(outputs[0].storage_type(), in_stype);
// rsp -> _, rsp. op requires 0-input returns 0-output
DCHECK_LT(fabs(static_cast<float>(ROP::Map(0))), 1e-5f);
UnaryOp::ComputeEx<xpu, ROP>(attrs, ctx, inputs, req, {outputs[1]});
} else {
LogUnimplementedOp(attrs, ctx, inputs, req, outputs);
}
}
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseIn(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<typename xpu, typename LOP, typename ROP>
static inline void BackwardUseInWithHalf2(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<TBlob> &inputs,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &outputs) {
MSHADOW_TYPE_SWITCH_WITH_HALF2(outputs[0].type_flag_, DType, {
BackwardUseIn_<xpu, LOP, ROP, DType>(attrs, ctx, inputs, req, outputs);
});
}
template<
typename xpu, typename LOP, typename ROP,
bool in0_ok_dense = false, bool in1_ok_dense = false, bool in2_ok_dense = false>
static inline void BackwardUseInEx(const nnvm::NodeAttrs &attrs,
const OpContext &ctx,
const std::vector<NDArray> &inputs,
const std::vector<OpReqType> &req,
const std::vector<NDArray> &outputs) {
using namespace common;
CHECK_EQ(inputs.size(), 3U);
CHECK_EQ(outputs.size(), 2U); // lhs input grad, rhs input grad
const auto lhs_grad_stype = outputs[0].storage_type();
const auto rhs_grad_stype = outputs[1].storage_type();
if (ContainsOnlyStorage(inputs, kRowSparseStorage) &&
(lhs_grad_stype == kDefaultStorage || lhs_grad_stype == kRowSparseStorage) &&
(rhs_grad_stype == kDefaultStorage || rhs_grad_stype == kRowSparseStorage)) {
// rsp, rsp, rsp -> [dns, rsp], [dns, rsp]
MSHADOW_TYPE_SWITCH(outputs[0].dtype(), DType, {
BackwardUseInEx_<xpu, LOP, ROP, DType, in0_ok_dense, in1_ok_dense, in2_ok_dense>(
attrs, ctx, inputs, req, outputs, BackwardUseIn<xpu, LOP, ROP>);
});
}
}
}; // class ElemwiseBinaryOp
/*! \brief Binary launch */
#define MXNET_OPERATOR_REGISTER_BINARY(name) \
NNVM_REGISTER_OP(name) \
.set_num_inputs(2) \
.set_num_outputs(1) \
.set_attr<nnvm::FListInputNames>("FListInputNames", \
[](const NodeAttrs& attrs) { \
return std::vector<std::string>{"lhs", "rhs"}; \
}) \
.set_attr<nnvm::FInferShape>("FInferShape", ElemwiseShape<2, 1>) \
.set_attr<nnvm::FInferType>("FInferType", ElemwiseType<2, 1>) \
.set_attr<nnvm::FInplaceOption>("FInplaceOption", \
[](const NodeAttrs& attrs){ \
return std::vector<std::pair<int, int> >{{0, 0}, {1, 0}}; \
}) \
.add_argument("lhs", "NDArray-or-Symbol", "first input") \
.add_argument("rhs", "NDArray-or-Symbol", "second input")
/*! \brief Binary launch, with FComputeEx for csr and rsp available */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseStorageType<2, 1, true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, with FComputeEx for csr and rsp available.
when inputs contain both sparse and dense, sparse output is preferred. */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PS(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferSparseStorageType) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
/*! \brief Binary launch, dense result
* FInferStorageType attr is not set using this macro.
* By default DefaultStorageType is used.
*/
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_DR(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::SparseSparseWithDenseResult) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>)
/*! \brief Binary launch, with FComputeEx for prefer dense */
#define MXNET_OPERATOR_REGISTER_BINARY_WITH_SPARSE_CPU_PD(__name$, __kernel$) \
MXNET_OPERATOR_REGISTER_BINARY(__name$) \
.set_attr<FInferStorageType>("FInferStorageType", \
ElemwiseBinaryOp::PreferDenseStorageType<true, true, true>) \
.set_attr<FCompute>("FCompute<cpu>", ElemwiseBinaryOp::Compute<cpu, __kernel$>) \
.set_attr<FComputeEx>("FComputeEx<cpu>", ElemwiseBinaryOp::ComputeEx<cpu, __kernel$>) \
.set_attr<FResourceRequest>("FResourceRequest", /* For Sparse CSR */ \
[](const NodeAttrs& attrs) { \
return std::vector<ResourceRequest>{ResourceRequest::kTempSpace};})
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_TENSOR_ELEMWISE_BINARY_OP_H_
|
sse.h | /* SPDX-License-Identifier: MIT
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Copyright:
* 2017-2020 Evan Nemerson <evan@nemerson.com>
* 2015-2017 John W. Ratcliff <jratcliffscarab@gmail.com>
* 2015 Brandon Rowlett <browlett@nvidia.com>
* 2015 Ken Fast <kfast@gdeb.com>
*/
#if !defined(SIMDE_X86_SSE_H)
#define SIMDE_X86_SSE_H
#include "mmx.h"
#if defined(_WIN32)
#include <windows.h>
#endif
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DISABLE_UNWANTED_DIAGNOSTICS
SIMDE_BEGIN_DECLS_
typedef union {
#if defined(SIMDE_VECTOR_SUBSCRIPT)
SIMDE_ALIGN(16) int8_t i8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int16_t i16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int32_t i32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int64_t i64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint8_t u8 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint16_t u16 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint32_t u32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint64_t u64 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) simde_uint128 u128 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#endif
SIMDE_ALIGN(16) simde_float32 f32 SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) int_fast32_t i32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
SIMDE_ALIGN(16) uint_fast32_t u32f SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
SIMDE_ALIGN(16) int8_t i8[16];
SIMDE_ALIGN(16) int16_t i16[8];
SIMDE_ALIGN(16) int32_t i32[4];
SIMDE_ALIGN(16) int64_t i64[2];
SIMDE_ALIGN(16) uint8_t u8[16];
SIMDE_ALIGN(16) uint16_t u16[8];
SIMDE_ALIGN(16) uint32_t u32[4];
SIMDE_ALIGN(16) uint64_t u64[2];
#if defined(SIMDE_HAVE_INT128_)
SIMDE_ALIGN(16) simde_int128 i128[1];
SIMDE_ALIGN(16) simde_uint128 u128[1];
#endif
SIMDE_ALIGN(16) simde_float32 f32[4];
SIMDE_ALIGN(16) int_fast32_t i32f[16 / sizeof(int_fast32_t)];
SIMDE_ALIGN(16) uint_fast32_t u32f[16 / sizeof(uint_fast32_t)];
#endif
SIMDE_ALIGN(16) simde__m64_private m64_private[2];
SIMDE_ALIGN(16) simde__m64 m64[2];
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_ALIGN(16) __m128 n;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) int8x16_t neon_i8;
SIMDE_ALIGN(16) int16x8_t neon_i16;
SIMDE_ALIGN(16) int32x4_t neon_i32;
SIMDE_ALIGN(16) int64x2_t neon_i64;
SIMDE_ALIGN(16) uint8x16_t neon_u8;
SIMDE_ALIGN(16) uint16x8_t neon_u16;
SIMDE_ALIGN(16) uint32x4_t neon_u32;
SIMDE_ALIGN(16) uint64x2_t neon_u64;
SIMDE_ALIGN(16) float32x4_t neon_f32;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_ALIGN(16) float64x2_t neon_f64;
#endif
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
SIMDE_ALIGN(16) v128_t wasm_v128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) altivec_u8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned short) altivec_u16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) altivec_u32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed char) altivec_i8;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed short) altivec_i16;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed int) altivec_i32;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(float) altivec_f32;
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long) altivec_u64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(signed long long) altivec_i64;
SIMDE_ALIGN(16) SIMDE_POWER_ALTIVEC_VECTOR(double) altivec_f64;
#endif
#endif
} simde__m128_private;
#if defined(SIMDE_X86_SSE_NATIVE)
typedef __m128 simde__m128;
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
typedef float32x4_t simde__m128;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
typedef v128_t simde__m128;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
typedef SIMDE_POWER_ALTIVEC_VECTOR(float) simde__m128;
#elif defined(SIMDE_VECTOR_SUBSCRIPT)
typedef simde_float32 simde__m128 SIMDE_ALIGN(16) SIMDE_VECTOR(16) SIMDE_MAY_ALIAS;
#else
typedef simde__m128_private simde__m128;
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
typedef simde__m128 __m128;
#endif
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128), "simde__m128 size incorrect");
HEDLEY_STATIC_ASSERT(16 == sizeof(simde__m128_private), "simde__m128_private size incorrect");
#if defined(SIMDE_CHECK_ALIGNMENT) && defined(SIMDE_ALIGN_OF)
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128) == 16, "simde__m128 is not 16-byte aligned");
HEDLEY_STATIC_ASSERT(SIMDE_ALIGN_OF(simde__m128_private) == 16, "simde__m128_private is not 16-byte aligned");
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_private(simde__m128_private v) {
simde__m128 r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128_private
simde__m128_to_private(simde__m128 v) {
simde__m128_private r;
simde_memcpy(&r, &v, sizeof(r));
return r;
}
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int8x16_t, neon, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int16x8_t, neon, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int32x4_t, neon, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, int64x2_t, neon, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint8x16_t, neon, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint16x8_t, neon, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint32x4_t, neon, u32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, uint64x2_t, neon, u64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float32x4_t, neon, f32)
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, float64x2_t, neon, f64)
#endif
#endif /* defined(SIMDE_ARM_NEON_A32V7_NATIVE) */
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed char), altivec, i8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed short), altivec, i16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed int), altivec, i32)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned char), altivec, u8)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned short), altivec, u16)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), altivec, u32)
#if defined(SIMDE_BUG_GCC_95782)
SIMDE_FUNCTION_ATTRIBUTES
SIMDE_POWER_ALTIVEC_VECTOR(float)
simde__m128_to_altivec_f32(simde__m128 value) {
simde__m128_private r_ = simde__m128_to_private(value);
return r_.altivec_f32;
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde__m128_from_altivec_f32(SIMDE_POWER_ALTIVEC_VECTOR(float) value) {
simde__m128_private r_;
r_.altivec_f32 = value;
return simde__m128_from_private(r_);
}
#else
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(float), altivec, f32)
#endif
#if defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(signed long long), altivec, i64)
SIMDE_X86_GENERATE_CONVERSION_FUNCTION(m128, SIMDE_POWER_ALTIVEC_VECTOR(unsigned long long), altivec, u64)
#endif
#endif /* defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) */
enum {
#if defined(SIMDE_X86_SSE_NATIVE)
SIMDE_MM_ROUND_NEAREST = _MM_ROUND_NEAREST,
SIMDE_MM_ROUND_DOWN = _MM_ROUND_DOWN,
SIMDE_MM_ROUND_UP = _MM_ROUND_UP,
SIMDE_MM_ROUND_TOWARD_ZERO = _MM_ROUND_TOWARD_ZERO
#else
SIMDE_MM_ROUND_NEAREST = 0x0000,
SIMDE_MM_ROUND_DOWN = 0x2000,
SIMDE_MM_ROUND_UP = 0x4000,
SIMDE_MM_ROUND_TOWARD_ZERO = 0x6000
#endif
};
#if defined(_MM_FROUND_TO_NEAREST_INT)
# define SIMDE_MM_FROUND_TO_NEAREST_INT _MM_FROUND_TO_NEAREST_INT
# define SIMDE_MM_FROUND_TO_NEG_INF _MM_FROUND_TO_NEG_INF
# define SIMDE_MM_FROUND_TO_POS_INF _MM_FROUND_TO_POS_INF
# define SIMDE_MM_FROUND_TO_ZERO _MM_FROUND_TO_ZERO
# define SIMDE_MM_FROUND_CUR_DIRECTION _MM_FROUND_CUR_DIRECTION
# define SIMDE_MM_FROUND_RAISE_EXC _MM_FROUND_RAISE_EXC
# define SIMDE_MM_FROUND_NO_EXC _MM_FROUND_NO_EXC
#else
# define SIMDE_MM_FROUND_TO_NEAREST_INT 0x00
# define SIMDE_MM_FROUND_TO_NEG_INF 0x01
# define SIMDE_MM_FROUND_TO_POS_INF 0x02
# define SIMDE_MM_FROUND_TO_ZERO 0x03
# define SIMDE_MM_FROUND_CUR_DIRECTION 0x04
# define SIMDE_MM_FROUND_RAISE_EXC 0x00
# define SIMDE_MM_FROUND_NO_EXC 0x08
#endif
#define SIMDE_MM_FROUND_NINT \
(SIMDE_MM_FROUND_TO_NEAREST_INT | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_FLOOR \
(SIMDE_MM_FROUND_TO_NEG_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_CEIL \
(SIMDE_MM_FROUND_TO_POS_INF | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_TRUNC \
(SIMDE_MM_FROUND_TO_ZERO | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_RINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_RAISE_EXC)
#define SIMDE_MM_FROUND_NEARBYINT \
(SIMDE_MM_FROUND_CUR_DIRECTION | SIMDE_MM_FROUND_NO_EXC)
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES) && !defined(_MM_FROUND_TO_NEAREST_INT)
# define _MM_FROUND_TO_NEAREST_INT SIMDE_MM_FROUND_TO_NEAREST_INT
# define _MM_FROUND_TO_NEG_INF SIMDE_MM_FROUND_TO_NEG_INF
# define _MM_FROUND_TO_POS_INF SIMDE_MM_FROUND_TO_POS_INF
# define _MM_FROUND_TO_ZERO SIMDE_MM_FROUND_TO_ZERO
# define _MM_FROUND_CUR_DIRECTION SIMDE_MM_FROUND_CUR_DIRECTION
# define _MM_FROUND_RAISE_EXC SIMDE_MM_FROUND_RAISE_EXC
# define _MM_FROUND_NINT SIMDE_MM_FROUND_NINT
# define _MM_FROUND_FLOOR SIMDE_MM_FROUND_FLOOR
# define _MM_FROUND_CEIL SIMDE_MM_FROUND_CEIL
# define _MM_FROUND_TRUNC SIMDE_MM_FROUND_TRUNC
# define _MM_FROUND_RINT SIMDE_MM_FROUND_RINT
# define _MM_FROUND_NEARBYINT SIMDE_MM_FROUND_NEARBYINT
#endif
SIMDE_FUNCTION_ATTRIBUTES
unsigned int
SIMDE_MM_GET_ROUNDING_MODE(void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _MM_GET_ROUNDING_MODE();
#elif defined(SIMDE_HAVE_FENV_H)
unsigned int vfe_mode;
switch (fegetround()) {
#if defined(FE_TONEAREST)
case FE_TONEAREST:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case FE_TOWARDZERO:
vfe_mode = SIMDE_MM_ROUND_DOWN;
break;
#endif
#if defined(FE_UPWARD)
case FE_UPWARD:
vfe_mode = SIMDE_MM_ROUND_UP;
break;
#endif
#if defined(FE_DOWNWARD)
case FE_DOWNWARD:
vfe_mode = SIMDE_MM_ROUND_TOWARD_ZERO;
break;
#endif
default:
vfe_mode = SIMDE_MM_ROUND_NEAREST;
break;
}
return vfe_mode;
#else
return SIMDE_MM_ROUND_NEAREST;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_GET_ROUNDING_MODE() SIMDE_MM_GET_ROUNDING_MODE()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
SIMDE_MM_SET_ROUNDING_MODE(unsigned int a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_MM_SET_ROUNDING_MODE(a);
#elif defined(SIMDE_HAVE_FENV_H)
int fe_mode = FE_TONEAREST;
switch (a) {
#if defined(FE_TONEAREST)
case SIMDE_MM_ROUND_NEAREST:
fe_mode = FE_TONEAREST;
break;
#endif
#if defined(FE_TOWARDZERO)
case SIMDE_MM_ROUND_TOWARD_ZERO:
fe_mode = FE_TOWARDZERO;
break;
#endif
#if defined(FE_DOWNWARD)
case SIMDE_MM_ROUND_DOWN:
fe_mode = FE_DOWNWARD;
break;
#endif
#if defined(FE_UPWARD)
case SIMDE_MM_ROUND_UP:
fe_mode = FE_UPWARD;
break;
#endif
default:
return;
}
fesetround(fe_mode);
#else
(void) a;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _MM_SET_ROUNDING_MODE(a) SIMDE_MM_SET_ROUNDING_MODE(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
uint32_t
simde_mm_getcsr (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_getcsr();
#else
return SIMDE_MM_GET_ROUNDING_MODE();
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_getcsr() simde_mm_getcsr()
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_setcsr (uint32_t a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_setcsr(a);
#else
SIMDE_MM_SET_ROUNDING_MODE(HEDLEY_STATIC_CAST(unsigned int, a));
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
#define _mm_setcsr(a) simde_mm_setcsr(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_round_ps (simde__m128 a, int rounding)
SIMDE_REQUIRE_CONSTANT_RANGE(rounding, 0, 15) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
/* For architectures which lack a current direction SIMD instruction.
*
* Note that NEON actually has a current rounding mode instruction,
* but in ARMv8+ the rounding mode is ignored and nearest is always
* used, so we treat ARMv7 as having a rounding mode but ARMv8 as
* not. */
#if \
defined(SIMDE_POWER_ALTIVEC_P6_NATIVE) || \
defined(SIMDE_ARM_NEON_A32V8)
if ((rounding & 7) == SIMDE_MM_FROUND_CUR_DIRECTION)
rounding = HEDLEY_STATIC_CAST(int, SIMDE_MM_GET_ROUNDING_MODE()) << 13;
#endif
switch (rounding & ~SIMDE_MM_FROUND_NO_EXC) {
case SIMDE_MM_FROUND_CUR_DIRECTION:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndiq_f32(a_.neon_f32);
#elif defined(simde_math_nearbyintf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_nearbyintf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEAREST_INT:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_round(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndaq_f32(a_.neon_f32);
#elif defined(simde_math_roundf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_roundf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_NEG_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_floor(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndmq_f32(a_.neon_f32);
#elif defined(simde_math_floorf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_floorf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_POS_INF:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_ceil(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndpq_f32(a_.neon_f32);
#elif defined(simde_math_ceilf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_ceilf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
case SIMDE_MM_FROUND_TO_ZERO:
#if defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_trunc(a_.altivec_f32));
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && 0
r_.neon_f32 = vrndq_f32(a_.neon_f32);
#elif defined(simde_math_truncf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_truncf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
#endif
break;
default:
HEDLEY_UNREACHABLE_RETURN(simde_mm_undefined_pd());
}
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE4_1_NATIVE)
#define simde_mm_round_ps(a, rounding) _mm_round_ps(a, rounding)
#endif
#if defined(SIMDE_X86_SSE4_1_ENABLE_NATIVE_ALIASES)
#define _mm_round_ps(a, rounding) simde_mm_round_ps(a, rounding)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps(e3, e2, e1, e0);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
SIMDE_ALIGN(16) simde_float32 data[4] = { e0, e1, e2, e3 };
r_.neon_f32 = vld1q_f32(data);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_make(e0, e1, e2, e3);
#else
r_.f32[0] = e0;
r_.f32[1] = e1;
r_.f32[2] = e2;
r_.f32[3] = e3;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps(e3, e2, e1, e0) simde_mm_set_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ps1 (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ps1(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(a);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
(void) a;
return vec_splats(a);
#else
return simde_mm_set_ps(a, a, a, a);
#endif
}
#define simde_mm_set1_ps(a) simde_mm_set_ps1(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ps1(a) simde_mm_set_ps1(a)
# define _mm_set1_ps(a) simde_mm_set1_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_move_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_move_ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(b_.neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
SIMDE_POWER_ALTIVEC_VECTOR(unsigned char) m = {
16, 17, 18, 19,
4, 5, 6, 7,
8, 9, 10, 11,
12, 13, 14, 15
};
r_.altivec_f32 = vec_perm(a_.altivec_f32, b_.altivec_f32, m);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v8x16_shuffle(b_.wasm_v128, a_.wasm_v128, 0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 4, 1, 2, 3);
#else
r_.f32[0] = b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_move_ss(a, b) simde_mm_move_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vaddq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_add(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_add(a_.altivec_f32, b_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 + b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] + b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ps(a, b) simde_mm_add_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_add_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_add_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_add_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t b0 = vgetq_lane_f32(b_.neon_f32, 0);
float32x4_t value = vsetq_lane_f32(b0, vdupq_n_f32(0), 0);
// the upper values in the result must be the remnants of <a>.
r_.neon_f32 = vaddq_f32(a_.neon_f32, value);
#else
r_.f32[0] = a_.f32[0] + b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_add_ss(a, b) simde_mm_add_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_and_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_and_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vandq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 & b_.i32;
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_and(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_and_ps(a, b) simde_mm_and_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_andnot_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_andnot_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbicq_s32(b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_andnot(b_.wasm_v128, a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_andc(b_.altivec_f32, a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32 & b_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]) & b_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_andnot_ps(a, b) simde_mm_andnot_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_xor_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_xor_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = veorq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_xor(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_xor(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f ^ b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] ^ b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_xor_ps(a, b) simde_mm_xor_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_or_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_or_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vorrq_s32(a_.neon_i32, b_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_or(a_.altivec_i32, b_.altivec_i32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32f = a_.i32f | b_.i32f;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i] | b_.u32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_or_ps(a, b) simde_mm_or_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_not_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE2_NATIVE)
/* Note: we use ints instead of floats because we don't want cmpeq
* to return false for (NaN, NaN) */
__m128i ai = _mm_castps_si128(a);
return _mm_castsi128_ps(_mm_andnot_si128(ai, _mm_cmpeq_epi32(ai, ai)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vmvnq_s32(a_.neon_i32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_nor(a_.altivec_i32, a_.altivec_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_not(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = ~a_.i32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = ~(a_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_select_ps(simde__m128 a, simde__m128 b, simde__m128 mask) {
/* This function is for when you want to blend two elements together
* according to a mask. It is similar to _mm_blendv_ps, except that
* it is undefined whether the blend is based on the highest bit in
* each lane (like blendv) or just bitwise operations. This allows
* us to implement the function efficiently everywhere.
*
* Basically, you promise that all the lanes in mask are either 0 or
* ~0. */
#if defined(SIMDE_X86_SSE4_1_NATIVE)
return _mm_blendv_ps(a, b, mask);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b),
mask_ = simde__m128_to_private(mask);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vbslq_s32(mask_.neon_u32, b_.neon_i32, a_.neon_i32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_bitselect(b_.wasm_v128, a_.wasm_v128, mask_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_i32 = vec_sel(a_.altivec_i32, b_.altivec_i32, mask_.altivec_u32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = a_.i32 ^ ((a_.i32 ^ b_.i32) & mask_.i32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i] ^ ((a_.i32[i] ^ b_.i32[i]) & mask_.i32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u16 = vrhadd_u16(b_.neon_u16, a_.neon_u16);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint32_t wa SIMDE_VECTOR(16);
uint32_t wb SIMDE_VECTOR(16);
uint32_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u16);
SIMDE_CONVERT_VECTOR_(wb, b_.u16);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u16, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = (a_.u16[i] + b_.u16[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu16(a, b) simde_mm_avg_pu16(a, b)
# define _m_pavgw(a, b) simde_mm_avg_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_avg_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_avg_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vrhadd_u8(b_.neon_u8, a_.neon_u8);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS) && defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR) && defined(SIMDE_CONVERT_VECTOR_)
uint16_t wa SIMDE_VECTOR(16);
uint16_t wb SIMDE_VECTOR(16);
uint16_t wr SIMDE_VECTOR(16);
SIMDE_CONVERT_VECTOR_(wa, a_.u8);
SIMDE_CONVERT_VECTOR_(wb, b_.u8);
wr = (wa + wb + 1) >> 1;
SIMDE_CONVERT_VECTOR_(r_.u8, wr);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] + b_.u8[i] + 1) >> 1;
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_avg_pu8(a, b) simde_mm_avg_pu8(a, b)
# define _m_pavgb(a, b) simde_mm_avg_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_abs_ps(simde__m128 a) {
#if defined(SIMDE_X86_AVX512F_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(7,1,0))
return _mm512_castps512_ps128(_mm512_abs_ps(_mm512_castps128_ps512(a)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vabsq_f32(a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_abs(a_.altivec_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_abs(a_.wasm_v128);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_fabsf(a_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vceqq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_eq(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpeq(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), a_.f32 == b_.f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] == b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ps(a, b) simde_mm_cmpeq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpeq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpeq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpeq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] == b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpeq_ss(a, b) simde_mm_cmpeq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpge_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgeq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ge(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpge(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 >= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] >= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ps(a, b) simde_mm_cmpge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpge_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpge_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpge_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] >= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpge_ss(a, b) simde_mm_cmpge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpgt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcgtq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_gt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpgt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 > b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] > b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ps(a, b) simde_mm_cmpgt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpgt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpgt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpgt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] > b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpgt_ss(a, b) simde_mm_cmpgt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcleq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_le(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmple(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 <= b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] <= b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ps(a, b) simde_mm_cmple_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmple_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmple_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmple_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] <= b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmple_ss(a, b) simde_mm_cmple_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vcltq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmplt(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 < b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] < b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ps(a, b) simde_mm_cmplt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmplt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmplt_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmplt_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] < b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmplt_ss(a, b) simde_mm_cmplt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u32 = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_ne(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE) && SIMDE_ARCH_POWER_CHECK(900) && !defined(HEDLEY_IBM_VERSION)
/* vec_cmpne(SIMDE_POWER_ALTIVEC_VECTOR(float), SIMDE_POWER_ALTIVEC_VECTOR(float))
is missing from XL C/C++ v16.1.1,
though the documentation (table 89 on page 432 of the IBM XL C/C++ for
Linux Compiler Reference, Version 16.1.1) shows that it should be
present. Both GCC and clang support it. */
r_.altivec_f32 = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(float), vec_cmpne(a_.altivec_f32, b_.altivec_f32));
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.i32 = HEDLEY_STATIC_CAST(__typeof__(r_.i32), (a_.f32 != b_.f32));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (a_.f32[i] != b_.f32[i]) ? ~UINT32_C(0) : UINT32_C(0);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ps(a, b) simde_mm_cmpneq_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpneq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpneq_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpneq_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.u32[0] = (a_.f32[0] != b_.f32[0]) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpneq_ss(a, b) simde_mm_cmpneq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ps(a, b) simde_mm_cmpnge_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnge_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmplt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnge_ss(a, b) simde_mm_cmpnge_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ps(a, b) simde_mm_cmpngt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpngt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmple_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpngt_ss(a, b) simde_mm_cmpngt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ps(a, b) simde_mm_cmpnle_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnle_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpgt_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnle_ss(a, b) simde_mm_cmpnle_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ps (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ps(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ps(a, b) simde_mm_cmpnlt_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpnlt_ss (simde__m128 a, simde__m128 b) {
return simde_mm_cmpge_ss(a, b);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpnlt_ss(a, b) simde_mm_cmpnlt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_and(wasm_f32x4_eq(a, a), wasm_f32x4_eq(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
/* Note: NEON does not have ordered compare builtin
Need to compare a eq a and b eq b to check for NaN
Do AND of results to get final */
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vandq_u32(ceqaa, ceqbb);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_and(wasm_f32x4_eq(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_eq(b_.wasm_v128, b_.wasm_v128));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? UINT32_C(0) : ~UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ps(a, b) simde_mm_cmpord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpunord_ps(a, b);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
return wasm_v128_or(wasm_f32x4_ne(a, a), wasm_f32x4_ne(b, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t ceqaa = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t ceqbb = vceqq_f32(b_.neon_f32, b_.neon_f32);
r_.neon_u32 = vmvnq_u32(vandq_u32(ceqaa, ceqbb));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_or(wasm_f32x4_ne(a_.wasm_v128, a_.wasm_v128), wasm_f32x4_ne(b_.wasm_v128, b_.wasm_v128));
#elif defined(simde_math_isnanf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = (simde_math_isnanf(a_.f32[i]) || simde_math_isnanf(b_.f32[i])) ? ~UINT32_C(0) : UINT32_C(0);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ps(a, b) simde_mm_cmpunord_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpunord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
return _mm_cmpunord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpunord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(a_.f32[0]) || simde_math_isnanf(b_.f32[0])) ? ~UINT32_C(0) : UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.u32) / sizeof(r_.u32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpunord_ss(a, b) simde_mm_cmpunord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#else
return a_.f32[0] == b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comieq_ss(a, b) simde_mm_comieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#else
return a_.f32[0] >= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comige_ss(a, b) simde_mm_comige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#else
return a_.f32[0] > b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comigt_ss(a, b) simde_mm_comigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#else
return a_.f32[0] <= b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comile_ss(a, b) simde_mm_comile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
return !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#else
return a_.f32[0] < b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comilt_ss(a, b) simde_mm_comilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_comineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_comineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
return !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#else
return a_.f32[0] != b_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_comineq_ss(a, b) simde_mm_comineq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_copysign_ps(simde__m128 dest, simde__m128 src) {
simde__m128_private
r_,
dest_ = simde__m128_to_private(dest),
src_ = simde__m128_to_private(src);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t sign_pos = vreinterpretq_u32_f32(vdupq_n_f32(-SIMDE_FLOAT32_C(0.0)));
r_.neon_u32 = vbslq_u32(sign_pos, src_.neon_u32, dest_.neon_u32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
const v128_t sign_pos = wasm_f32x4_splat(-0.0f);
r_.wasm_v128 = wasm_v128_bitselect(src_.wasm_v128, dest_.wasm_v128, sign_pos);
#elif defined(SIMDE_POWER_ALTIVEC_P9_NATIVE)
#if !defined(HEDLEY_IBM_VERSION)
r_.altivec_f32 = vec_cpsgn(dest_.altivec_f32, src_.altivec_f32);
#else
r_.altivec_f32 = vec_cpsgn(src_.altivec_f32, dest_.altivec_f32);
#endif
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
const SIMDE_POWER_ALTIVEC_VECTOR(unsigned int) sign_pos = HEDLEY_REINTERPRET_CAST(SIMDE_POWER_ALTIVEC_VECTOR(unsigned int), vec_splats(-0.0f));
r_.altivec_f32 = vec_sel(dest_.altivec_f32, src_.altivec_f32, sign_pos);
#elif defined(SIMDE_IEEE754_STORAGE)
(void) src_;
(void) dest_;
simde__m128 sign_pos = simde_mm_set1_ps(-0.0f);
r_ = simde__m128_to_private(simde_mm_xor_ps(dest, simde_mm_and_ps(simde_mm_xor_ps(dest, src), sign_pos)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = simde_math_copysignf(dest_.f32[i], src_.f32[i]);
}
#endif
return simde__m128_from_private(r_);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_xorsign_ps(simde__m128 dest, simde__m128 src) {
return simde_mm_xor_ps(simde_mm_and_ps(simde_mm_set1_ps(-0.0f), src), dest);
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_pi2ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_pi2ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_pi2ps(a, b) simde_mm_cvt_pi2ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_) && !defined(__clang__) && 0
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
a_ = simde__m128_to_private(a);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = HEDLEY_STATIC_CAST(int32_t, simde_math_nearbyintf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ps2pi(a) simde_mm_cvt_ps2pi((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvt_si2ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_si2ss(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
r_.i32[1] = a_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_si2ss(a, b) simde_mm_cvt_si2ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvt_ss2si(a);
#elif defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
return vgetq_lane_s32(vcvtnq_s32_f32(simde__m128_to_neon_f32(a)), 0);
#else
simde__m128_private a_ = simde__m128_to_private(simde_mm_round_ps(a, SIMDE_MM_FROUND_CUR_DIRECTION));
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvt_ss2si(a) simde_mm_cvt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE) && 0 /* TODO */
r_.neon_f32 = vmovl_s16(vget_low_s16(vuzp1q_s16(a_.neon_i16, vmovq_n_s16(0))));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
simde_float32 v = a_.i16[i];
r_.f32[i] = v;
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi16_ps(a) simde_mm_cvtpi16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32_ps (simde__m128 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
simde__m64_private b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vcvt_f32_s32(b_.neon_i32), vget_high_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, b_.i32);
r_.m64_private[1] = a_.m64_private[1];
#else
r_.f32[0] = (simde_float32) b_.i32[0];
r_.f32[1] = (simde_float32) b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32_ps(a, b) simde_mm_cvtpi32_ps((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi32x2_ps (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi32x2_ps(a, b);
#else
simde__m128_private r_;
simde__m64_private
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vcombine_s32(a_.neon_i32, b_.neon_i32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.m64_private[0].f32, a_.i32);
SIMDE_CONVERT_VECTOR_(r_.m64_private[1].f32, b_.i32);
#else
r_.f32[0] = (simde_float32) a_.i32[0];
r_.f32[1] = (simde_float32) a_.i32[1];
r_.f32[2] = (simde_float32) b_.i32[0];
r_.f32[3] = (simde_float32) b_.i32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi32x2_ps(a, b) simde_mm_cvtpi32x2_ps(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpi8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpi8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_s32(vmovl_s16(vget_low_s16(vmovl_s8(a_.neon_i8))));
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[0]);
r_.f32[1] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[1]);
r_.f32[2] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[2]);
r_.f32[3] = HEDLEY_STATIC_CAST(simde_float32, a_.i8[3]);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpi8_ps(a) simde_mm_cvtpi8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi16 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi16(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i16 = vmovn_s32(vcvtq_s32_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = SIMDE_CONVERT_FTOI(int16_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi16(a) simde_mm_cvtps_pi16((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi32(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95399)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(vrndiq_f32(a_.neon_f32)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, simde_math_roundf(a_.f32[i]));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi32(a) simde_mm_cvtps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtps_pi8 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtps_pi8(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V8_NATIVE) && !defined(SIMDE_BUG_GCC_95471)
/* Clamp the input to [INT8_MIN, INT8_MAX], round, convert to i32, narrow to
* i16, combine with an all-zero vector of i16 (which will become the upper
* half), narrow to i8. */
float32x4_t max = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MAX));
float32x4_t min = vdupq_n_f32(HEDLEY_STATIC_CAST(simde_float32, INT8_MIN));
float32x4_t values = vrndnq_f32(vmaxq_f32(vminq_f32(max, a_.neon_f32), min));
r_.neon_i8 = vmovn_s16(vcombine_s16(vmovn_s32(vcvtq_s32_f32(values)), vdup_n_s16(0)));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.f32) / sizeof(a_.f32[0])) ; i++) {
if (a_.f32[i] > HEDLEY_STATIC_CAST(simde_float32, INT8_MAX))
r_.i8[i] = INT8_MAX;
else if (a_.f32[i] < HEDLEY_STATIC_CAST(simde_float32, INT8_MIN))
r_.i8[i] = INT8_MIN;
else
r_.i8[i] = SIMDE_CONVERT_FTOI(int8_t, simde_math_roundf(a_.f32[i]));
}
/* Note: the upper half is undefined */
#endif
return simde__m64_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtps_pi8(a) simde_mm_cvtps_pi8((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu16_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu16_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(a_.neon_u16));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.f32, a_.u16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (simde_float32) a_.u16[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu16_ps(a) simde_mm_cvtpu16_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtpu8_ps (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtpu8_ps(a);
#else
simde__m128_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcvtq_f32_u32(vmovl_u16(vget_low_u16(vmovl_u8(a_.neon_u8))));
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = HEDLEY_STATIC_CAST(simde_float32, a_.u8[i]);
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtpu8_ps(a) simde_mm_cvtpu8_ps(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi32_ss (simde__m128 a, int32_t b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtsi32_ss(a, b);
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.i32) / sizeof(r_.i32[0])) ; i++) {
r_.i32[i] = a_.i32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi32_ss(a, b) simde_mm_cvtsi32_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cvtsi64_ss (simde__m128 a, int64_t b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtsi64_ss(a, b);
#else
return _mm_cvtsi64x_ss(a, b);
#endif
#else
simde__m128_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(HEDLEY_STATIC_CAST(float32_t, b), a_.neon_f32, 0);
#else
r_ = a_;
r_.f32[0] = HEDLEY_STATIC_CAST(simde_float32, b);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtsi64_ss(a, b) simde_mm_cvtsi64_ss((a), b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde_float32
simde_mm_cvtss_f32 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtss_f32(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vgetq_lane_f32(a_.neon_f32, 0);
#else
return a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_f32(a) simde_mm_cvtss_f32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtss_si32 (simde__m128 a) {
return simde_mm_cvt_ss2si(a);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si32(a) simde_mm_cvtss_si32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvtss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64)
#if !defined(__PGI)
return _mm_cvtss_si64(a);
#else
return _mm_cvtss_si64x(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(vgetq_lane_f32(a_.neon_f32, 0)));
#else
return SIMDE_CONVERT_FTOI(int64_t, simde_math_roundf(a_.f32[0]));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtss_si64(a) simde_mm_cvtss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_cvtt_ps2pi (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_cvtt_ps2pi(a);
#else
simde__m64_private r_;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i32 = vcvt_s32_f32(vget_low_f32(a_.neon_f32));
#elif defined(SIMDE_CONVERT_VECTOR_)
SIMDE_CONVERT_VECTOR_(r_.i32, a_.m64_private[0].f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.i32[i] = SIMDE_CONVERT_FTOI(int32_t, a_.f32[i]);
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_mm_cvttps_pi32(a) simde_mm_cvtt_ps2pi(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ps2pi(a) simde_mm_cvtt_ps2pi((a))
# define _mm_cvttps_pi32(a) simde_mm_cvttps_pi32((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int32_t
simde_mm_cvtt_ss2si (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cvtt_ss2si(a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int32_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int32_t, a_.f32[0]);
#endif
#endif
}
#define simde_mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvtt_ss2si(a) simde_mm_cvtt_ss2si((a))
# define _mm_cvttss_si32(a) simde_mm_cvtt_ss2si((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int64_t
simde_mm_cvttss_si64 (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_ARCH_AMD64) && !defined(_MSC_VER)
#if defined(__PGI)
return _mm_cvttss_si64x(a);
#else
return _mm_cvttss_si64(a);
#endif
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return SIMDE_CONVERT_FTOI(int64_t, vgetq_lane_f32(a_.neon_f32, 0));
#else
return SIMDE_CONVERT_FTOI(int64_t, a_.f32[0]);
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cvttss_si64(a) simde_mm_cvttss_si64((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_cmpord_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_cmpord_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_cmpord_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(simde_math_isnanf)
r_.u32[0] = (simde_math_isnanf(simde_mm_cvtss_f32(a)) || simde_math_isnanf(simde_mm_cvtss_f32(b))) ? UINT32_C(0) : ~UINT32_C(0);
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.u32[i] = a_.u32[i];
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_cmpord_ss(a, b) simde_mm_cmpord_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vdivq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip0 = vrecpeq_f32(b_.neon_f32);
float32x4_t recip1 = vmulq_f32(recip0, vrecpsq_f32(recip0, b_.neon_f32));
r_.neon_f32 = vmulq_f32(a_.neon_f32, recip1);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 / b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] / b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ps(a, b) simde_mm_div_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_div_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_div_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_div_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_div_ps(a, b)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = a_.f32[0] / b_.f32[0];
SIMDE_VECTORIZE
for (size_t i = 1 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_div_ss(a, b) simde_mm_div_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int16_t
simde_mm_extract_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private a_ = simde__m64_to_private(a);
return a_.i16[imm8];
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(HEDLEY_PGI_VERSION)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define simde_mm_extract_pi16(a, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16((a), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_extract_pi16(a, imm8) HEDLEY_STATIC_CAST(int16_t, _mm_extract_pi16(a, imm8))
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_extract_pi16(a, imm8) vget_lane_s16(simde__m64_to_private(a).neon_i16, imm8)
#endif
#define simde_m_pextrw(a, imm8) simde_mm_extract_pi16(a, imm8)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_extract_pi16(a, imm8) simde_mm_extract_pi16((a), (imm8))
# define _m_pextrw(a, imm8) simde_mm_extract_pi16((a), (imm8))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_insert_pi16 (simde__m64 a, int16_t i, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 3) {
simde__m64_private
r_,
a_ = simde__m64_to_private(a);
r_.i64[0] = a_.i64[0];
r_.i16[imm8] = i;
return simde__m64_from_private(r_);
}
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# if HEDLEY_HAS_WARNING("-Wvector-conversion")
/* https://bugs.llvm.org/show_bug.cgi?id=44589 */
# define ssimde_mm_insert_pi16(a, i, imm8) ( \
HEDLEY_DIAGNOSTIC_PUSH \
_Pragma("clang diagnostic ignored \"-Wvector-conversion\"") \
(_mm_insert_pi16((a), (i), (imm8))) \
HEDLEY_DIAGNOSTIC_POP \
)
# else
# define simde_mm_insert_pi16(a, i, imm8) _mm_insert_pi16(a, i, imm8)
# endif
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
# define simde_mm_insert_pi16(a, i, imm8) simde__m64_from_neon_i16(vset_lane_s16((i), simde__m64_to_neon_i16(a), (imm8)))
#endif
#define simde_m_pinsrw(a, i, imm8) (simde_mm_insert_pi16(a, i, imm8))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_insert_pi16(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
# define _m_pinsrw(a, i, imm8) simde_mm_insert_pi16(a, i, imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
r_.altivec_f32 = vec_vsx_ld(0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_ld(0, mem_addr);
#else
r_ = *SIMDE_ALIGN_CAST(simde__m128_private const*, mem_addr);
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps(mem_addr) simde_mm_load_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ps1 (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ps1(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_dup_f32(mem_addr);
#else
r_ = simde__m128_to_private(simde_mm_set1_ps(*mem_addr));
#endif
return simde__m128_from_private(r_);
#endif
}
#define simde_mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ps1(mem_addr) simde_mm_load_ps1(mem_addr)
# define _mm_load1_ps(mem_addr) simde_mm_load_ps1(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_load_ss (simde_float32 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_load_ss(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(*mem_addr, vdupq_n_f32(0), 0);
#else
r_.f32[0] = *mem_addr;
r_.i32[1] = 0;
r_.i32[2] = 0;
r_.i32[3] = 0;
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_load_ss(mem_addr) simde_mm_load_ss(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadh_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_loadh_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vget_low_f32(a_.neon_f32), vld1_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)));
#else
simde__m64_private b_ = *HEDLEY_REINTERPRET_CAST(simde__m64_private const*, mem_addr);
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadh_pi(a, mem_addr) simde_mm_loadh_pi((a), (simde__m64 const*) (mem_addr))
#endif
/* The SSE documentation says that there are no alignment requirements
for mem_addr. Unfortunately they used the __m64 type for the argument
which is supposed to be 8-byte aligned, so some compilers (like clang
with -Wcast-align) will generate a warning if you try to cast, say,
a simde_float32* to a simde__m64* for this function.
I think the choice of argument type is unfortunate, but I do think we
need to stick to it here. If there is demand I can always add something
like simde_x_mm_loadl_f32(simde__m128, simde_float32 mem_addr[2]) */
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadl_pi (simde__m128 a, simde__m64 const* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadl_pi(a, HEDLEY_REINTERPRET_CAST(__m64 const*, mem_addr));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vcombine_f32(vld1_f32(
HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr)), vget_high_f32(a_.neon_f32));
#else
simde__m64_private b_;
simde_memcpy(&b_, mem_addr, sizeof(b_));
r_.i32[0] = b_.i32[0];
r_.i32[1] = b_.i32[1];
r_.i32[2] = a_.i32[2];
r_.i32[3] = a_.i32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadl_pi(a, mem_addr) simde_mm_loadl_pi((a), (simde__m64 const*) (mem_addr))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadr_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadr_ps(mem_addr);
#else
simde__m128_private
r_,
v_ = simde__m128_to_private(simde_mm_load_ps(mem_addr));
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrev64q_f32(v_.neon_f32);
r_.neon_f32 = vextq_f32(r_.neon_f32, r_.neon_f32, 2);
#elif defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && 0
/* TODO: XLC documentation has it, but it doesn't seem to work.
* More investigation is necessary. */
r_.altivec_f32 = vec_reve(a_.altivec_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, v_.f32, v_.f32, 3, 2, 1, 0);
#else
r_.f32[0] = v_.f32[3];
r_.f32[1] = v_.f32[2];
r_.f32[2] = v_.f32[1];
r_.f32[3] = v_.f32[0];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadr_ps(mem_addr) simde_mm_loadr_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_loadu_ps (simde_float32 const mem_addr[HEDLEY_ARRAY_PARAM(4)]) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_loadu_ps(mem_addr);
#else
simde__m128_private r_;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vld1q_f32(HEDLEY_REINTERPRET_CAST(const float32_t*, mem_addr));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_v128_load(mem_addr);
#else
r_.f32[0] = mem_addr[0];
r_.f32[1] = mem_addr[1];
r_.f32[2] = mem_addr[2];
r_.f32[3] = mem_addr[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_loadu_ps(mem_addr) simde_mm_loadu_ps(mem_addr)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_maskmove_si64 (simde__m64 a, simde__m64 mask, int8_t* mem_addr) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_maskmove_si64(a, mask, HEDLEY_REINTERPRET_CAST(char*, mem_addr));
#else
simde__m64_private
a_ = simde__m64_to_private(a),
mask_ = simde__m64_to_private(mask);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(a_.i8) / sizeof(a_.i8[0])) ; i++)
if (mask_.i8[i] < 0)
mem_addr[i] = a_.i8[i];
#endif
}
#define simde_m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64(a, mask, mem_addr)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_maskmove_si64(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
# define _m_maskmovq(a, mask, mem_addr) simde_mm_maskmove_si64((a), (mask), SIMDE_CHECKED_REINTERPRET_CAST(int8_t*, char*, (mem_addr)))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmax_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] > b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pi16(a, b) simde_mm_max_pi16(a, b)
# define _m_pmaxsw(a, b) simde_mm_max_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmaxq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_max(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_max(a_.altivec_f32, b_.altivec_f32);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] > b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ps(a, b) simde_mm_max_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_max_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_max_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmax_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] > b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_pu8(a, b) simde_mm_max_pu8(a, b)
# define _m_pmaxub(a, b) simde_mm_max_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_max_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_max_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_max_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(maxq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] > b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_max_ss(a, b) simde_mm_max_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pi16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pi16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_i16 = vmin_s16(a_.neon_i16, b_.neon_i16);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.i16) / sizeof(r_.i16[0])) ; i++) {
r_.i16[i] = (a_.i16[i] < b_.i16[i]) ? a_.i16[i] : b_.i16[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminsw(a, b) simde_mm_min_pi16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pi16(a, b) simde_mm_min_pi16(a, b)
# define _m_pminsw(a, b) simde_mm_min_pi16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ps(a, b);
#elif defined(SIMDE_FAST_NANS) && defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return simde__m128_from_neon_f32(vminq_f32(simde__m128_to_neon_f32(a), simde__m128_to_neon_f32(b)));
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.wasm_v128 = wasm_f32x4_min(a_.wasm_v128, b_.wasm_v128);
#else
r_.wasm_v128 = wasm_v128_bitselect(a_.wasm_v128, b_.wasm_v128, wasm_f32x4_lt(a_.wasm_v128, b_.wasm_v128));
#endif
return simde__m128_from_private(r_);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_FAST_NANS)
r_.altivec_f32 = vec_min(a_.altivec_f32, b_.altivec_f32);
#else
r_.altivec_f32 = vec_sel(b_.altivec_f32, a_.altivec_f32, vec_cmpgt(b_.altivec_f32, a_.altivec_f32));
#endif
return simde__m128_from_private(r_);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
simde__m128 mask = simde_mm_cmplt_ps(a, b);
return simde_mm_or_ps(simde_mm_and_ps(mask, a), simde_mm_andnot_ps(mask, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = (a_.f32[i] < b_.f32[i]) ? a_.f32[i] : b_.f32[i];
}
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ps(a, b) simde_mm_min_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_min_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_min_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_u8 = vmin_u8(a_.neon_u8, b_.neon_u8);
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
r_.u8[i] = (a_.u8[i] < b_.u8[i]) ? a_.u8[i] : b_.u8[i];
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pminub(a, b) simde_mm_min_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_pu8(a, b) simde_mm_min_pu8(a, b)
# define _m_pminub(a, b) simde_mm_min_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_min_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_min_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_min_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value = vgetq_lane_f32(vminq_f32(a_.neon_f32, b_.neon_f32), 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#else
r_.f32[0] = (a_.f32[0] < b_.f32[0]) ? a_.f32[0] : b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_min_ss(a, b) simde_mm_min_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movehl_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movehl_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a32 = vget_high_f32(a_.neon_f32);
float32x2_t b32 = vget_high_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(b32, a32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 6, 7, 2, 3);
#else
r_.f32[0] = b_.f32[2];
r_.f32[1] = b_.f32[3];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movehl_ps(a, b) simde_mm_movehl_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_movelh_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_movelh_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a10 = vget_low_f32(a_.neon_f32);
float32x2_t b10 = vget_low_f32(b_.neon_f32);
r_.neon_f32 = vcombine_f32(a10, b10);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 1, 4, 5);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = b_.f32[0];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movelh_ps(a, b) simde_mm_movelh_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_pi8 (simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_pi8(a);
#else
simde__m64_private a_ = simde__m64_to_private(a);
int r = 0;
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
uint8x8_t input = a_.neon_u8;
const int8_t xr[8] = {-7, -6, -5, -4, -3, -2, -1, 0};
const uint8x8_t mask_and = vdup_n_u8(0x80);
const int8x8_t mask_shift = vld1_s8(xr);
const uint8x8_t mask_result = vshl_u8(vand_u8(input, mask_and), mask_shift);
uint8x8_t lo = mask_result;
r = vaddv_u8(lo);
#else
const size_t nmemb = sizeof(a_.i8) / sizeof(a_.i8[0]);
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < nmemb ; i++) {
r |= (a_.u8[nmemb - 1 - i] >> 7) << (nmemb - 1 - i);
}
#endif
return r;
#endif
}
#define simde_m_pmovmskb(a) simde_mm_movemask_pi8(a)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_pi8(a) simde_mm_movemask_pi8(a)
# define _m_pmovmskb(a) simde_mm_movemask_pi8(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_movemask_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_movemask_ps(a);
#else
int r = 0;
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
static const int32_t shift_amount[] = { 0, 1, 2, 3 };
const int32x4_t shift = vld1q_s32(shift_amount);
uint32x4_t tmp = vshrq_n_u32(a_.neon_u32, 31);
return HEDLEY_STATIC_CAST(int, vaddvq_u32(vshlq_u32(tmp, shift)));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
// Shift out everything but the sign bits with a 32-bit unsigned shift right.
uint64x2_t high_bits = vreinterpretq_u64_u32(vshrq_n_u32(a_.neon_u32, 31));
// Merge the two pairs together with a 64-bit unsigned shift right + add.
uint8x16_t paired = vreinterpretq_u8_u64(vsraq_n_u64(high_bits, high_bits, 31));
// Extract the result.
return vgetq_lane_u8(paired, 0) | (vgetq_lane_u8(paired, 8) << 2);
#else
SIMDE_VECTORIZE_REDUCTION(|:r)
for (size_t i = 0 ; i < sizeof(a_.u32) / sizeof(a_.u32[0]) ; i++) {
r |= (a_.u32[i] >> ((sizeof(a_.u32[i]) * CHAR_BIT) - 1)) << i;
}
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_movemask_ps(a) simde_mm_movemask_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vmulq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_mul(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 * b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] * b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ps(a, b) simde_mm_mul_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_mul_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_mul_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_mul_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] * b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mul_ss(a, b) simde_mm_mul_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_mulhi_pu16 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_mulhi_pu16(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
const uint32x4_t t1 = vmull_u16(a_.neon_u16, b_.neon_u16);
const uint32x4_t t2 = vshrq_n_u32(t1, 16);
const uint16x4_t t3 = vmovn_u32(t2);
r_.neon_u16 = t3;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.u16) / sizeof(r_.u16[0])) ; i++) {
r_.u16[i] = HEDLEY_STATIC_CAST(uint16_t, ((HEDLEY_STATIC_CAST(uint32_t, a_.u16[i]) * HEDLEY_STATIC_CAST(uint32_t, b_.u16[i])) >> UINT32_C(16)));
}
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_mulhi_pu16(a, b) simde_mm_mulhi_pu16(a, b)
# define _m_pmulhuw(a, b) simde_mm_mulhi_pu16(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_prefetch (char const* p, int i) {
#if defined(HEDLEY_GCC_VERSION)
__builtin_prefetch(p);
#else
(void) p;
#endif
(void) i;
}
#if defined(SIMDE_X86_SSE_NATIVE)
# define simde_mm_prefetch(p, i) _mm_prefetch(p, i)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_prefetch(p, i) simde_mm_prefetch(p, i)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_negate_ps(simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return simde_mm_xor_ps(a, _mm_set1_ps(SIMDE_FLOAT32_C(-0.0)));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_POWER_ALTIVEC_P8_NATIVE) && \
(!defined(HEDLEY_GCC_VERSION) || HEDLEY_GCC_VERSION_CHECK(8,1,0))
r_.altivec_f32 = vec_neg(a_.altivec_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vnegq_f32(a_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_neg(a_.wasm_v128);
#elif defined(SIMDE_VECTOR_NEGATE)
r_.f32 = -a_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = -a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t recip = vrecpeq_f32(a_.neon_f32);
#if SIMDE_ACCURACY_PREFERENCE > 0
for (int i = 0; i < SIMDE_ACCURACY_PREFERENCE ; ++i) {
recip = vmulq_f32(recip, vrecpsq_f32(recip, a_.neon_f32));
}
#endif
r_.neon_f32 = recip;
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_div(simde_mm_set1_ps(1.0f), a_.wasm_v128);
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
r_.altivec_f32 = vec_re(a_.altivec_f32);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_SCALAR)
r_.f32 = 1.0f / a_.f32;
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://stackoverflow.com/questions/12227126/division-as-multiply-and-lut-fast-float-division-reciprocal/12228234#12228234 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
int32_t ix;
simde_float32 fx = a_.f32[i];
simde_memcpy(&ix, &fx, sizeof(ix));
int32_t x = INT32_C(0x7EF311C3) - ix;
simde_float32 temp;
simde_memcpy(&temp, &x, sizeof(temp));
r_.f32[i] = temp * (SIMDE_FLOAT32_C(2.0) - temp * fx);
}
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / a_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ps(a) simde_mm_rcp_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rcp_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rcp_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rcp_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
r_.f32[0] = 1.0f / a_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rcp_ss(a) simde_mm_rcp_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vrsqrteq_f32(a_.neon_f32);
#elif defined(SIMDE_IEEE754_STORAGE)
/* https://basesandframes.files.wordpress.com/2020/04/even_faster_math_functions_green_2020.pdf
Pages 100 - 103 */
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[i] = INT32_C(0x5F37624F) - (a_.i32[i] >> 1);
#else
simde_float32 x = a_.f32[i];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[i] = x;
#endif
}
#elif defined(simde_math_sqrtf)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = 1.0f / simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ps(a) simde_mm_rsqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_rsqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_rsqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_rsqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsetq_lane_f32(vgetq_lane_f32(simde_mm_rsqrt_ps(a).neon_f32, 0), a_.neon_f32, 0);
#elif defined(SIMDE_IEEE754_STORAGE)
{
#if SIMDE_ACCURACY_PREFERENCE <= 0
r_.i32[0] = INT32_C(0x5F37624F) - (a_.i32[0] >> 1);
#else
simde_float32 x = a_.f32[0];
simde_float32 xhalf = SIMDE_FLOAT32_C(0.5) * x;
int32_t ix;
simde_memcpy(&ix, &x, sizeof(ix));
#if SIMDE_ACCURACY_PREFERENCE == 1
ix = INT32_C(0x5F375A82) - (ix >> 1);
#else
ix = INT32_C(0x5F37599E) - (ix >> 1);
#endif
simde_memcpy(&x, &ix, sizeof(x));
#if SIMDE_ACCURACY_PREFERENCE >= 2
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
#endif
x = x * (SIMDE_FLOAT32_C(1.5008909) - xhalf * x * x);
r_.f32[0] = x;
#endif
}
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#elif defined(simde_math_sqrtf)
r_.f32[0] = 1.0f / simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_rsqrt_ss(a) simde_mm_rsqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_sad_pu8 (simde__m64 a, simde__m64 b) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
return _mm_sad_pu8(a, b);
#else
simde__m64_private
r_,
a_ = simde__m64_to_private(a),
b_ = simde__m64_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint16x4_t t = vpaddl_u8(vabd_u8(a_.neon_u8, b_.neon_u8));
uint16_t r0 = t[0] + t[1] + t[2] + t[3];
r_.neon_u16 = vset_lane_u16(r0, vdup_n_u16(0), 0);
#else
uint16_t sum = 0;
#if defined(SIMDE_HAVE_STDLIB_H)
SIMDE_VECTORIZE_REDUCTION(+:sum)
for (size_t i = 0 ; i < (sizeof(r_.u8) / sizeof(r_.u8[0])) ; i++) {
sum += HEDLEY_STATIC_CAST(uint8_t, abs(a_.u8[i] - b_.u8[i]));
}
r_.i16[0] = HEDLEY_STATIC_CAST(int16_t, sum);
r_.i16[1] = 0;
r_.i16[2] = 0;
r_.i16[3] = 0;
#else
HEDLEY_UNREACHABLE();
#endif
#endif
return simde__m64_from_private(r_);
#endif
}
#define simde_m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sad_pu8(a, b) simde_mm_sad_pu8(a, b)
# define _m_psadbw(a, b) simde_mm_sad_pu8(a, b)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_set_ss (simde_float32 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_set_ss(a);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vsetq_lane_f32(a, vdupq_n_f32(SIMDE_FLOAT32_C(0.0)), 0);
#else
return simde_mm_set_ps(SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), SIMDE_FLOAT32_C(0.0), a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_set_ss(a) simde_mm_set_ss(a)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setr_ps (simde_float32 e3, simde_float32 e2, simde_float32 e1, simde_float32 e0) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setr_ps(e3, e2, e1, e0);
#else
return simde_mm_set_ps(e0, e1, e2, e3);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setr_ps(e3, e2, e1, e0) simde_mm_setr_ps(e3, e2, e1, e0)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_setzero_ps (void) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_setzero_ps();
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
return vdupq_n_f32(SIMDE_FLOAT32_C(0.0));
#elif defined(SIMDE_POWER_ALTIVEC_P6_NATIVE)
return vec_splats(SIMDE_FLOAT32_C(0.0));
#else
simde__m128 r;
simde_memset(&r, 0, sizeof(r));
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_setzero_ps() simde_mm_setzero_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_undefined_ps (void) {
simde__m128_private r_;
#if defined(SIMDE_HAVE_UNDEFINED128)
r_.n = _mm_undefined_ps();
#elif !defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
r_ = simde__m128_to_private(simde_mm_setzero_ps());
#endif
return simde__m128_from_private(r_);
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_undefined_ps() simde_mm_undefined_ps()
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_POP
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_x_mm_setone_ps (void) {
simde__m128 t = simde_mm_setzero_ps();
return simde_mm_cmpeq_ps(t, t);
}
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_sfence (void) {
/* TODO: Use Hedley. */
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_sfence();
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 7))
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif !defined(__INTEL_COMPILER) && defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_ATOMICS__)
#if defined(__GNUC__) && (__GNUC__ == 4) && (__GNUC_MINOR__ < 9)
__atomic_thread_fence(__ATOMIC_SEQ_CST);
#else
atomic_thread_fence(memory_order_seq_cst);
#endif
#elif defined(_MSC_VER)
MemoryBarrier();
#elif HEDLEY_HAS_EXTENSION(c_atomic)
__c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
#elif defined(__GNUC__) && ((__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 1))
__sync_synchronize();
#elif defined(_OPENMP)
#pragma omp critical(simde_mm_sfence_)
{ }
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sfence() simde_mm_sfence()
#endif
#define SIMDE_MM_SHUFFLE(z, y, x, w) (((z) << 6) | ((y) << 4) | ((x) << 2) | (w))
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_SHUFFLE(z, y, x, w) SIMDE_MM_SHUFFLE(z, y, x, w)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_pi16(a, imm8) _mm_shuffle_pi16(a, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_pi16(a, imm8) (__extension__ ({ \
const simde__m64_private simde__tmp_a_ = simde__m64_to_private(a); \
simde__m64_from_private((simde__m64_private) { .i16 = \
SIMDE_SHUFFLE_VECTOR_(16, 8, \
(simde__tmp_a_).i16, \
(simde__tmp_a_).i16, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3), \
(((imm8) >> 6) & 3)) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m64
simde_mm_shuffle_pi16 (simde__m64 a, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m64_private r_;
simde__m64_private a_ = simde__m64_to_private(a);
for (size_t i = 0 ; i < sizeof(r_.i16) / sizeof(r_.i16[0]) ; i++) {
r_.i16[i] = a_.i16[(imm8 >> (i * 2)) & 3];
}
HEDLEY_DIAGNOSTIC_PUSH
#if HEDLEY_HAS_WARNING("-Wconditional-uninitialized")
# pragma clang diagnostic ignored "-Wconditional-uninitialized"
#endif
return simde__m64_from_private(r_);
HEDLEY_DIAGNOSTIC_POP
}
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE) && !defined(__PGI)
# define simde_m_pshufw(a, imm8) _m_pshufw(a, imm8)
#else
# define simde_m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_pi16(a, imm8) simde_mm_shuffle_pi16(a, imm8)
# define _m_pshufw(a, imm8) simde_mm_shuffle_pi16(a, imm8)
#endif
#if defined(SIMDE_X86_SSE_NATIVE) && !defined(__PGI)
# define simde_mm_shuffle_ps(a, b, imm8) _mm_shuffle_ps(a, b, imm8)
#elif defined(SIMDE_SHUFFLE_VECTOR_)
# define simde_mm_shuffle_ps(a, b, imm8) (__extension__ ({ \
simde__m128_from_private((simde__m128_private) { .f32 = \
SIMDE_SHUFFLE_VECTOR_(32, 16, \
simde__m128_to_private(a).f32, \
simde__m128_to_private(b).f32, \
(((imm8) ) & 3), \
(((imm8) >> 2) & 3), \
(((imm8) >> 4) & 3) + 4, \
(((imm8) >> 6) & 3) + 4) }); }))
#else
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_shuffle_ps (simde__m128 a, simde__m128 b, const int imm8)
SIMDE_REQUIRE_CONSTANT_RANGE(imm8, 0, 255) {
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[(imm8 >> 0) & 3];
r_.f32[1] = a_.f32[(imm8 >> 2) & 3];
r_.f32[2] = b_.f32[(imm8 >> 4) & 3];
r_.f32[3] = b_.f32[(imm8 >> 6) & 3];
return simde__m128_from_private(r_);
}
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_shuffle_ps(a, b, imm8) simde_mm_shuffle_ps((a), (b), imm8)
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ps (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ps(a);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vsqrtq_f32(a_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x4_t est = vrsqrteq_f32(a_.neon_f32);
for (int i = 0 ; i <= SIMDE_ACCURACY_PREFERENCE ; i++) {
est = vmulq_f32(vrsqrtsq_f32(vmulq_f32(a_.neon_f32, est), est), est);
}
r_.neon_f32 = vmulq_f32(a_.neon_f32, est);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sqrt(a_.wasm_v128);
#elif defined(simde_math_sqrt)
SIMDE_VECTORIZE
for (size_t i = 0 ; i < sizeof(r_.f32) / sizeof(r_.f32[0]) ; i++) {
r_.f32[i] = simde_math_sqrtf(a_.f32[i]);
}
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ps(a) simde_mm_sqrt_ps((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sqrt_ss (simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sqrt_ss(a);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sqrt_ps(a));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32_t value =
vgetq_lane_f32(simde__m128_to_private(simde_mm_sqrt_ps(a)).neon_f32, 0);
r_.neon_f32 = vsetq_lane_f32(value, a_.neon_f32, 0);
#elif defined(simde_math_sqrtf)
r_.f32[0] = simde_math_sqrtf(a_.f32[0]);
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
#else
HEDLEY_UNREACHABLE();
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sqrt_ss(a) simde_mm_sqrt_ss((a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#elif defined(SIMDE_POWER_ALTIVEC_P7_NATIVE)
vec_vsx_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_POWER_ALTIVEC_P5_NATIVE)
vec_st(a_.altivec_f32, 0, mem_addr);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
wasm_v128_store(mem_addr, a_.wasm_v128);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps(mem_addr, a) simde_mm_store_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ps1 (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ps1(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 0);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[0];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ps1(mem_addr, a) simde_mm_store_ps1(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store_ss (simde_float32* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store_ss(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_lane_f32(mem_addr, a_.neon_f32, 0);
#else
*mem_addr = a_.f32[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store_ss(mem_addr, a) simde_mm_store_ss(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_store1_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_store1_ps(mem_addr, a);
#else
simde_mm_store_ps1(mem_addr, a);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_store1_ps(mem_addr, a) simde_mm_store1_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeh_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeh_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->f32[0] = vgetq_lane_f32(a_.neon_f32, 2);
dest_->f32[1] = vgetq_lane_f32(a_.neon_f32, 3);
#else
dest_->f32[0] = a_.f32[2];
dest_->f32[1] = a_.f32[3];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeh_pi(mem_addr, a) simde_mm_storeh_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storel_pi (simde__m64* mem_addr, simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storel_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private* dest_ = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr);
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest_->neon_f32 = vget_low_f32(a_.neon_f32);
#else
dest_->f32[0] = a_.f32[0];
dest_->f32[1] = a_.f32[1];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storel_pi(mem_addr, a) simde_mm_storel_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storer_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storer_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_SHUFFLE_VECTOR_)
a_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, a_.f32, 3, 2, 1, 0);
simde_mm_store_ps(mem_addr, simde__m128_from_private(a_));
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
mem_addr[0] = vgetq_lane_f32(a_.neon_f32, 3);
mem_addr[1] = vgetq_lane_f32(a_.neon_f32, 2);
mem_addr[2] = vgetq_lane_f32(a_.neon_f32, 1);
mem_addr[3] = vgetq_lane_f32(a_.neon_f32, 0);
#else
SIMDE_VECTORIZE_ALIGNED(mem_addr:16)
for (size_t i = 0 ; i < sizeof(a_.f32) / sizeof(a_.f32[0]) ; i++) {
mem_addr[i] = a_.f32[((sizeof(a_.f32) / sizeof(a_.f32[0])) - 1) - i];
}
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storer_ps(mem_addr, a) simde_mm_storer_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_storeu_ps (simde_float32 mem_addr[4], simde__m128 a) {
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_storeu_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(mem_addr, a_.neon_f32);
#else
simde_memcpy(mem_addr, &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_storeu_ps(mem_addr, a) simde_mm_storeu_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
r_.neon_f32 = vsubq_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_WASM_SIMD128_NATIVE)
r_.wasm_v128 = wasm_f32x4_sub(a_.wasm_v128, b_.wasm_v128);
#elif defined(SIMDE_VECTOR_SUBSCRIPT_OPS)
r_.f32 = a_.f32 - b_.f32;
#else
SIMDE_VECTORIZE
for (size_t i = 0 ; i < (sizeof(r_.f32) / sizeof(r_.f32[0])) ; i++) {
r_.f32[i] = a_.f32[i] - b_.f32[i];
}
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ps(a, b) simde_mm_sub_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_sub_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_sub_ss(a, b);
#elif (SIMDE_NATURAL_VECTOR_SIZE > 0)
return simde_mm_move_ss(a, simde_mm_sub_ps(a, b));
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
r_.f32[0] = a_.f32[0] - b_.f32[0];
r_.f32[1] = a_.f32[1];
r_.f32[2] = a_.f32[2];
r_.f32[3] = a_.f32[3];
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_sub_ss(a, b) simde_mm_sub_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomieq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomieq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_eq_b = vceqq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_eq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] == b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] == b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomieq_ss(a, b) simde_mm_ucomieq_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomige_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomige_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_ge_b = vcgeq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_ge_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] >= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] >= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomige_ss(a, b) simde_mm_ucomige_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomigt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomigt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_gt_b = vcgtq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_gt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] > b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] > b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomigt_ss(a, b) simde_mm_ucomigt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomile_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomile_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_le_b = vcleq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_le_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] <= b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] <= b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomile_ss(a, b) simde_mm_ucomile_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomilt_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomilt_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_or_b_nan = vmvnq_u32(vandq_u32(a_not_nan, b_not_nan));
uint32x4_t a_lt_b = vcltq_f32(a_.neon_f32, b_.neon_f32);
r = !!(vgetq_lane_u32(vorrq_u32(a_or_b_nan, a_lt_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] < b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] < b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomilt_ss(a, b) simde_mm_ucomilt_ss((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
int
simde_mm_ucomineq_ss (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_ucomineq_ss(a, b);
#else
simde__m128_private
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
int r;
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
uint32x4_t a_not_nan = vceqq_f32(a_.neon_f32, a_.neon_f32);
uint32x4_t b_not_nan = vceqq_f32(b_.neon_f32, b_.neon_f32);
uint32x4_t a_and_b_not_nan = vandq_u32(a_not_nan, b_not_nan);
uint32x4_t a_neq_b = vmvnq_u32(vceqq_f32(a_.neon_f32, b_.neon_f32));
r = !!(vgetq_lane_u32(vandq_u32(a_and_b_not_nan, a_neq_b), 0) != 0);
#elif defined(SIMDE_HAVE_FENV_H)
fenv_t envp;
int x = feholdexcept(&envp);
r = a_.f32[0] != b_.f32[0];
if (HEDLEY_LIKELY(x == 0))
fesetenv(&envp);
#else
r = a_.f32[0] != b_.f32[0];
#endif
return r;
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_ucomineq_ss(a, b) simde_mm_ucomineq_ss((a), (b))
#endif
#if defined(SIMDE_X86_SSE_NATIVE)
# if defined(__has_builtin)
# if __has_builtin(__builtin_ia32_undef128)
# define SIMDE_HAVE_UNDEFINED128
# endif
# elif !defined(__PGI) && !defined(SIMDE_BUG_GCC_REV_208793) && !defined(_MSC_VER)
# define SIMDE_HAVE_UNDEFINED128
# endif
#endif
#if defined(SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_)
HEDLEY_DIAGNOSTIC_PUSH
SIMDE_DIAGNOSTIC_DISABLE_UNINITIALIZED_
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpackhi_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpackhi_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip2q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_high_f32(a_.neon_f32);
float32x2_t b1 = vget_high_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 2, 6, 3, 7);
#else
r_.f32[0] = a_.f32[2];
r_.f32[1] = b_.f32[2];
r_.f32[2] = a_.f32[3];
r_.f32[3] = b_.f32[3];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpackhi_ps(a, b) simde_mm_unpackhi_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
simde__m128
simde_mm_unpacklo_ps (simde__m128 a, simde__m128 b) {
#if defined(SIMDE_X86_SSE_NATIVE)
return _mm_unpacklo_ps(a, b);
#else
simde__m128_private
r_,
a_ = simde__m128_to_private(a),
b_ = simde__m128_to_private(b);
#if defined(SIMDE_ARM_NEON_A64V8_NATIVE)
r_.neon_f32 = vzip1q_f32(a_.neon_f32, b_.neon_f32);
#elif defined(SIMDE_SHUFFLE_VECTOR_)
r_.f32 = SIMDE_SHUFFLE_VECTOR_(32, 16, a_.f32, b_.f32, 0, 4, 1, 5);
#elif defined(SIMDE_ARM_NEON_A32V7_NATIVE)
float32x2_t a1 = vget_low_f32(a_.neon_f32);
float32x2_t b1 = vget_low_f32(b_.neon_f32);
float32x2x2_t result = vzip_f32(a1, b1);
r_.neon_f32 = vcombine_f32(result.val[0], result.val[1]);
#else
r_.f32[0] = a_.f32[0];
r_.f32[1] = b_.f32[0];
r_.f32[2] = a_.f32[1];
r_.f32[3] = b_.f32[1];
#endif
return simde__m128_from_private(r_);
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_unpacklo_ps(a, b) simde_mm_unpacklo_ps((a), (b))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_pi (simde__m64* mem_addr, simde__m64 a) {
#if defined(SIMDE_X86_SSE_NATIVE) && defined(SIMDE_X86_MMX_NATIVE)
_mm_stream_pi(HEDLEY_REINTERPRET_CAST(__m64*, mem_addr), a);
#else
simde__m64_private*
dest = HEDLEY_REINTERPRET_CAST(simde__m64_private*, mem_addr),
a_ = simde__m64_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
dest->i64[0] = vget_lane_s64(a_.neon_i64, 0);
#else
dest->i64[0] = a_.i64[0];
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_pi(mem_addr, a) simde_mm_stream_pi(mem_addr, (a))
#endif
SIMDE_FUNCTION_ATTRIBUTES
void
simde_mm_stream_ps (simde_float32 mem_addr[4], simde__m128 a) {
simde_assert_aligned(16, mem_addr);
#if defined(SIMDE_X86_SSE_NATIVE)
_mm_stream_ps(mem_addr, a);
#else
simde__m128_private a_ = simde__m128_to_private(a);
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
vst1q_f32(SIMDE_ASSUME_ALIGNED(16, mem_addr), a_.neon_f32);
#else
simde_memcpy(SIMDE_ASSUME_ALIGNED(16, mem_addr), &a_, sizeof(a_));
#endif
#endif
}
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _mm_stream_ps(mem_addr, a) simde_mm_stream_ps(SIMDE_CHECKED_REINTERPRET_CAST(float*, simde_float32*, mem_addr), (a))
#endif
#if defined(SIMDE_ARM_NEON_A32V7_NATIVE)
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
float32x4x2_t ROW01 = vtrnq_f32(row0, row1); \
float32x4x2_t ROW23 = vtrnq_f32(row2, row3); \
row0 = vcombine_f32(vget_low_f32(ROW01.val[0]), \
vget_low_f32(ROW23.val[0])); \
row1 = vcombine_f32(vget_low_f32(ROW01.val[1]), \
vget_low_f32(ROW23.val[1])); \
row2 = vcombine_f32(vget_high_f32(ROW01.val[0]), \
vget_high_f32(ROW23.val[0])); \
row3 = vcombine_f32(vget_high_f32(ROW01.val[1]), \
vget_high_f32(ROW23.val[1])); \
} while (0)
#else
#define SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
do { \
simde__m128 tmp3, tmp2, tmp1, tmp0; \
tmp0 = simde_mm_unpacklo_ps((row0), (row1)); \
tmp2 = simde_mm_unpacklo_ps((row2), (row3)); \
tmp1 = simde_mm_unpackhi_ps((row0), (row1)); \
tmp3 = simde_mm_unpackhi_ps((row2), (row3)); \
row0 = simde_mm_movelh_ps(tmp0, tmp2); \
row1 = simde_mm_movehl_ps(tmp2, tmp0); \
row2 = simde_mm_movelh_ps(tmp1, tmp3); \
row3 = simde_mm_movehl_ps(tmp3, tmp1); \
} while (0)
#endif
#if defined(SIMDE_X86_SSE_ENABLE_NATIVE_ALIASES)
# define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) SIMDE_MM_TRANSPOSE4_PS(row0, row1, row2, row3)
#endif
#if defined(_MM_EXCEPT_INVALID)
# define SIMDE_MM_EXCEPT_INVALID _MM_EXCEPT_INVALID
#else
# define SIMDE_MM_EXCEPT_INVALID (0x0001)
#endif
#if defined(_MM_EXCEPT_DENORM)
# define SIMDE_MM_EXCEPT_DENORM _MM_EXCEPT_DENORM
#else
# define SIMDE_MM_EXCEPT_DENORM (0x0002)
#endif
#if defined(_MM_EXCEPT_DIV_ZERO)
# define SIMDE_MM_EXCEPT_DIV_ZERO _MM_EXCEPT_DIV_ZERO
#else
# define SIMDE_MM_EXCEPT_DIV_ZERO (0x0004)
#endif
#if defined(_MM_EXCEPT_OVERFLOW)
# define SIMDE_MM_EXCEPT_OVERFLOW _MM_EXCEPT_OVERFLOW
#else
# define SIMDE_MM_EXCEPT_OVERFLOW (0x0008)
#endif
#if defined(_MM_EXCEPT_UNDERFLOW)
# define SIMDE_MM_EXCEPT_UNDERFLOW _MM_EXCEPT_UNDERFLOW
#else
# define SIMDE_MM_EXCEPT_UNDERFLOW (0x0010)
#endif
#if defined(_MM_EXCEPT_INEXACT)
# define SIMDE_MM_EXCEPT_INEXACT _MM_EXCEPT_INEXACT
#else
# define SIMDE_MM_EXCEPT_INEXACT (0x0020)
#endif
#if defined(_MM_EXCEPT_MASK)
# define SIMDE_MM_EXCEPT_MASK _MM_EXCEPT_MASK
#else
# define SIMDE_MM_EXCEPT_MASK \
(SIMDE_MM_EXCEPT_INVALID | SIMDE_MM_EXCEPT_DENORM | \
SIMDE_MM_EXCEPT_DIV_ZERO | SIMDE_MM_EXCEPT_OVERFLOW | \
SIMDE_MM_EXCEPT_UNDERFLOW | SIMDE_MM_EXCEPT_INEXACT)
#endif
#if defined(_MM_MASK_INVALID)
# define SIMDE_MM_MASK_INVALID _MM_MASK_INVALID
#else
# define SIMDE_MM_MASK_INVALID (0x0080)
#endif
#if defined(_MM_MASK_DENORM)
# define SIMDE_MM_MASK_DENORM _MM_MASK_DENORM
#else
# define SIMDE_MM_MASK_DENORM (0x0100)
#endif
#if defined(_MM_MASK_DIV_ZERO)
# define SIMDE_MM_MASK_DIV_ZERO _MM_MASK_DIV_ZERO
#else
# define SIMDE_MM_MASK_DIV_ZERO (0x0200)
#endif
#if defined(_MM_MASK_OVERFLOW)
# define SIMDE_MM_MASK_OVERFLOW _MM_MASK_OVERFLOW
#else
# define SIMDE_MM_MASK_OVERFLOW (0x0400)
#endif
#if defined(_MM_MASK_UNDERFLOW)
# define SIMDE_MM_MASK_UNDERFLOW _MM_MASK_UNDERFLOW
#else
# define SIMDE_MM_MASK_UNDERFLOW (0x0800)
#endif
#if defined(_MM_MASK_INEXACT)
# define SIMDE_MM_MASK_INEXACT _MM_MASK_INEXACT
#else
# define SIMDE_MM_MASK_INEXACT (0x1000)
#endif
#if defined(_MM_MASK_MASK)
# define SIMDE_MM_MASK_MASK _MM_MASK_MASK
#else
# define SIMDE_MM_MASK_MASK \
(SIMDE_MM_MASK_INVALID | SIMDE_MM_MASK_DENORM | \
SIMDE_MM_MASK_DIV_ZERO | SIMDE_MM_MASK_OVERFLOW | \
SIMDE_MM_MASK_UNDERFLOW | SIMDE_MM_MASK_INEXACT)
#endif
#if defined(_MM_FLUSH_ZERO_MASK)
# define SIMDE_MM_FLUSH_ZERO_MASK _MM_FLUSH_ZERO_MASK
#else
# define SIMDE_MM_FLUSH_ZERO_MASK (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_ON)
# define SIMDE_MM_FLUSH_ZERO_ON _MM_FLUSH_ZERO_ON
#else
# define SIMDE_MM_FLUSH_ZERO_ON (0x8000)
#endif
#if defined(_MM_FLUSH_ZERO_OFF)
# define SIMDE_MM_FLUSH_ZERO_OFF _MM_FLUSH_ZERO_OFF
#else
# define SIMDE_MM_FLUSH_ZERO_OFF (0x0000)
#endif
SIMDE_END_DECLS_
HEDLEY_DIAGNOSTIC_POP
#endif /* !defined(SIMDE_X86_SSE_H) */
|
DeclOpenMP.h | //===- DeclOpenMP.h - Classes for representing OpenMP directives -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines OpenMP nodes for declarative directives.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_DECLOPENMP_H
#define LLVM_CLANG_AST_DECLOPENMP_H
#include "clang/AST/Decl.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/TrailingObjects.h"
namespace clang {
/// This represents '#pragma omp threadprivate ...' directive.
/// For example, in the following, both 'a' and 'A::b' are threadprivate:
///
/// \code
/// int a;
/// #pragma omp threadprivate(a)
/// struct A {
/// static int b;
/// #pragma omp threadprivate(b)
/// };
/// \endcode
///
class OMPThreadPrivateDecl final
: public Decl,
private llvm::TrailingObjects<OMPThreadPrivateDecl, Expr *> {
friend class ASTDeclReader;
friend TrailingObjects;
unsigned NumVars;
virtual void anchor();
OMPThreadPrivateDecl(Kind DK, DeclContext *DC, SourceLocation L) :
Decl(DK, DC, L), NumVars(0) { }
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
public:
static OMPThreadPrivateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L,
ArrayRef<Expr *> VL);
static OMPThreadPrivateDecl *CreateDeserialized(ASTContext &C,
unsigned ID, unsigned N);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPThreadPrivate; }
};
/// This represents '#pragma omp declare reduction ...' directive.
/// For example, in the following, declared reduction 'foo' for types 'int' and
/// 'float':
///
/// \code
/// #pragma omp declare reduction (foo : int,float : omp_out += omp_in) \
/// initializer (omp_priv = 0)
/// \endcode
///
/// Here 'omp_out += omp_in' is a combiner and 'omp_priv = 0' is an initializer.
class OMPDeclareReductionDecl final : public ValueDecl, public DeclContext {
// This class stores some data in DeclContext::OMPDeclareReductionDeclBits
// to save some space. Use the provided accessors to access it.
public:
enum InitKind {
CallInit, // Initialized by function call.
DirectInit, // omp_priv(<expr>)
CopyInit // omp_priv = <expr>
};
private:
friend class ASTDeclReader;
/// Combiner for declare reduction construct.
Expr *Combiner = nullptr;
/// Initializer for declare reduction construct.
Expr *Initializer = nullptr;
/// In parameter of the combiner.
Expr *In = nullptr;
/// Out parameter of the combiner.
Expr *Out = nullptr;
/// Priv parameter of the initializer.
Expr *Priv = nullptr;
/// Orig parameter of the initializer.
Expr *Orig = nullptr;
/// Reference to the previous declare reduction construct in the same
/// scope with the same name. Required for proper templates instantiation if
/// the declare reduction construct is declared inside compound statement.
LazyDeclPtr PrevDeclInScope;
void anchor() override;
OMPDeclareReductionDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
OMPDeclareReductionDecl *PrevDeclInScope);
void setPrevDeclInScope(OMPDeclareReductionDecl *Prev) {
PrevDeclInScope = Prev;
}
public:
/// Create declare reduction node.
static OMPDeclareReductionDecl *
Create(ASTContext &C, DeclContext *DC, SourceLocation L, DeclarationName Name,
QualType T, OMPDeclareReductionDecl *PrevDeclInScope);
/// Create deserialized declare reduction node.
static OMPDeclareReductionDecl *CreateDeserialized(ASTContext &C,
unsigned ID);
/// Get combiner expression of the declare reduction construct.
Expr *getCombiner() { return Combiner; }
const Expr *getCombiner() const { return Combiner; }
/// Get In variable of the combiner.
Expr *getCombinerIn() { return In; }
const Expr *getCombinerIn() const { return In; }
/// Get Out variable of the combiner.
Expr *getCombinerOut() { return Out; }
const Expr *getCombinerOut() const { return Out; }
/// Set combiner expression for the declare reduction construct.
void setCombiner(Expr *E) { Combiner = E; }
/// Set combiner In and Out vars.
void setCombinerData(Expr *InE, Expr *OutE) {
In = InE;
Out = OutE;
}
/// Get initializer expression (if specified) of the declare reduction
/// construct.
Expr *getInitializer() { return Initializer; }
const Expr *getInitializer() const { return Initializer; }
/// Get initializer kind.
InitKind getInitializerKind() const {
return static_cast<InitKind>(OMPDeclareReductionDeclBits.InitializerKind);
}
/// Get Orig variable of the initializer.
Expr *getInitOrig() { return Orig; }
const Expr *getInitOrig() const { return Orig; }
/// Get Priv variable of the initializer.
Expr *getInitPriv() { return Priv; }
const Expr *getInitPriv() const { return Priv; }
/// Set initializer expression for the declare reduction construct.
void setInitializer(Expr *E, InitKind IK) {
Initializer = E;
OMPDeclareReductionDeclBits.InitializerKind = IK;
}
/// Set initializer Orig and Priv vars.
void setInitializerData(Expr *OrigE, Expr *PrivE) {
Orig = OrigE;
Priv = PrivE;
}
/// Get reference to previous declare reduction construct in the same
/// scope with the same name.
OMPDeclareReductionDecl *getPrevDeclInScope();
const OMPDeclareReductionDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareReduction; }
static DeclContext *castToDeclContext(const OMPDeclareReductionDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareReductionDecl *>(D));
}
static OMPDeclareReductionDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareReductionDecl *>(
const_cast<DeclContext *>(DC));
}
};
/// This represents '#pragma omp declare mapper ...' directive. Map clauses are
/// allowed to use with this directive. The following example declares a user
/// defined mapper for the type 'struct vec'. This example instructs the fields
/// 'len' and 'data' should be mapped when mapping instances of 'struct vec'.
///
/// \code
/// #pragma omp declare mapper(mid: struct vec v) map(v.len, v.data[0:N])
/// \endcode
class OMPDeclareMapperDecl final : public ValueDecl, public DeclContext {
friend class ASTDeclReader;
/// Clauses associated with this mapper declaration
MutableArrayRef<OMPClause *> Clauses;
/// Mapper variable, which is 'v' in the example above
Expr *MapperVarRef = nullptr;
/// Name of the mapper variable
DeclarationName VarName;
LazyDeclPtr PrevDeclInScope;
void anchor() override;
OMPDeclareMapperDecl(Kind DK, DeclContext *DC, SourceLocation L,
DeclarationName Name, QualType Ty,
DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope)
: ValueDecl(DK, DC, L, Name, Ty), DeclContext(DK), VarName(VarName),
PrevDeclInScope(PrevDeclInScope) {}
void setPrevDeclInScope(OMPDeclareMapperDecl *Prev) {
PrevDeclInScope = Prev;
}
/// Sets an array of clauses to this mapper declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Creates declare mapper node.
static OMPDeclareMapperDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, DeclarationName Name,
QualType T, DeclarationName VarName,
OMPDeclareMapperDecl *PrevDeclInScope);
/// Creates deserialized declare mapper node.
static OMPDeclareMapperDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
/// Creates an array of clauses to this mapper declaration and intializes
/// them.
void CreateClauses(ASTContext &C, ArrayRef<OMPClause *> CL);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range =
llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return Clauses.size(); }
bool clauselist_empty() const { return Clauses.empty(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return Clauses.begin(); }
clauselist_iterator clauselist_end() { return Clauses.end(); }
clauselist_const_iterator clauselist_begin() const { return Clauses.begin(); }
clauselist_const_iterator clauselist_end() const { return Clauses.end(); }
/// Get the variable declared in the mapper
Expr *getMapperVarRef() { return MapperVarRef; }
const Expr *getMapperVarRef() const { return MapperVarRef; }
/// Set the variable declared in the mapper
void setMapperVarRef(Expr *MapperVarRefE) { MapperVarRef = MapperVarRefE; }
/// Get the name of the variable declared in the mapper
DeclarationName getVarName() { return VarName; }
/// Get reference to previous declare mapper construct in the same
/// scope with the same name.
OMPDeclareMapperDecl *getPrevDeclInScope();
const OMPDeclareMapperDecl *getPrevDeclInScope() const;
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPDeclareMapper; }
static DeclContext *castToDeclContext(const OMPDeclareMapperDecl *D) {
return static_cast<DeclContext *>(const_cast<OMPDeclareMapperDecl *>(D));
}
static OMPDeclareMapperDecl *castFromDeclContext(const DeclContext *DC) {
return static_cast<OMPDeclareMapperDecl *>(const_cast<DeclContext *>(DC));
}
};
/// Pseudo declaration for capturing expressions. Also is used for capturing of
/// non-static data members in non-static member functions.
///
/// Clang supports capturing of variables only, but OpenMP 4.5 allows to
/// privatize non-static members of current class in non-static member
/// functions. This pseudo-declaration allows properly handle this kind of
/// capture by wrapping captured expression into a variable-like declaration.
class OMPCapturedExprDecl final : public VarDecl {
friend class ASTDeclReader;
void anchor() override;
OMPCapturedExprDecl(ASTContext &C, DeclContext *DC, IdentifierInfo *Id,
QualType Type, TypeSourceInfo *TInfo,
SourceLocation StartLoc)
: VarDecl(OMPCapturedExpr, C, DC, StartLoc, StartLoc, Id, Type, TInfo,
SC_None) {
setImplicit();
}
public:
static OMPCapturedExprDecl *Create(ASTContext &C, DeclContext *DC,
IdentifierInfo *Id, QualType T,
SourceLocation StartLoc);
static OMPCapturedExprDecl *CreateDeserialized(ASTContext &C, unsigned ID);
SourceRange getSourceRange() const override LLVM_READONLY;
// Implement isa/cast/dyncast/etc.
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPCapturedExpr; }
};
/// This represents '#pragma omp requires...' directive.
/// For example
///
/// \code
/// #pragma omp requires unified_address
/// \endcode
///
class OMPRequiresDecl final
: public Decl,
private llvm::TrailingObjects<OMPRequiresDecl, OMPClause *> {
friend class ASTDeclReader;
friend TrailingObjects;
// Number of clauses associated with this requires declaration
unsigned NumClauses = 0;
virtual void anchor();
OMPRequiresDecl(Kind DK, DeclContext *DC, SourceLocation L)
: Decl(DK, DC, L), NumClauses(0) {}
/// Returns an array of immutable clauses associated with this requires
/// declaration
ArrayRef<const OMPClause *> getClauses() const {
return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses);
}
/// Returns an array of clauses associated with this requires declaration
MutableArrayRef<OMPClause *> getClauses() {
return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(),
NumClauses);
}
/// Sets an array of clauses to this requires declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
/// Create requires node.
static OMPRequiresDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<OMPClause *> CL);
/// Create deserialized requires node.
static OMPRequiresDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned N);
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned clauselist_size() const { return NumClauses; }
bool clauselist_empty() const { return NumClauses == 0; }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return getClauses().begin(); }
clauselist_iterator clauselist_end() { return getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPRequires; }
};
/// This represents '#pragma omp allocate ...' directive.
/// For example, in the following, the default allocator is used for both 'a'
/// and 'A::b':
///
/// \code
/// int a;
/// #pragma omp allocate(a)
/// struct A {
/// static int b;
/// #pragma omp allocate(b)
/// };
/// \endcode
///
class OMPAllocateDecl final
: public Decl,
private llvm::TrailingObjects<OMPAllocateDecl, Expr *, OMPClause *> {
friend class ASTDeclReader;
friend TrailingObjects;
/// Number of variable within the allocate directive.
unsigned NumVars = 0;
/// Number of clauses associated with the allocate directive.
unsigned NumClauses = 0;
size_t numTrailingObjects(OverloadToken<Expr *>) const {
return NumVars;
}
size_t numTrailingObjects(OverloadToken<OMPClause *>) const {
return NumClauses;
}
virtual void anchor();
OMPAllocateDecl(Kind DK, DeclContext *DC, SourceLocation L)
: Decl(DK, DC, L) {}
ArrayRef<const Expr *> getVars() const {
return llvm::makeArrayRef(getTrailingObjects<Expr *>(), NumVars);
}
MutableArrayRef<Expr *> getVars() {
return MutableArrayRef<Expr *>(getTrailingObjects<Expr *>(), NumVars);
}
void setVars(ArrayRef<Expr *> VL);
/// Returns an array of immutable clauses associated with this directive.
ArrayRef<OMPClause *> getClauses() const {
return llvm::makeArrayRef(getTrailingObjects<OMPClause *>(), NumClauses);
}
/// Returns an array of clauses associated with this directive.
MutableArrayRef<OMPClause *> getClauses() {
return MutableArrayRef<OMPClause *>(getTrailingObjects<OMPClause *>(),
NumClauses);
}
/// Sets an array of clauses to this requires declaration
void setClauses(ArrayRef<OMPClause *> CL);
public:
static OMPAllocateDecl *Create(ASTContext &C, DeclContext *DC,
SourceLocation L, ArrayRef<Expr *> VL,
ArrayRef<OMPClause *> CL);
static OMPAllocateDecl *CreateDeserialized(ASTContext &C, unsigned ID,
unsigned NVars, unsigned NClauses);
typedef MutableArrayRef<Expr *>::iterator varlist_iterator;
typedef ArrayRef<const Expr *>::iterator varlist_const_iterator;
typedef llvm::iterator_range<varlist_iterator> varlist_range;
typedef llvm::iterator_range<varlist_const_iterator> varlist_const_range;
using clauselist_iterator = MutableArrayRef<OMPClause *>::iterator;
using clauselist_const_iterator = ArrayRef<const OMPClause *>::iterator;
using clauselist_range = llvm::iterator_range<clauselist_iterator>;
using clauselist_const_range = llvm::iterator_range<clauselist_const_iterator>;
unsigned varlist_size() const { return NumVars; }
bool varlist_empty() const { return NumVars == 0; }
unsigned clauselist_size() const { return NumClauses; }
bool clauselist_empty() const { return NumClauses == 0; }
varlist_range varlists() {
return varlist_range(varlist_begin(), varlist_end());
}
varlist_const_range varlists() const {
return varlist_const_range(varlist_begin(), varlist_end());
}
varlist_iterator varlist_begin() { return getVars().begin(); }
varlist_iterator varlist_end() { return getVars().end(); }
varlist_const_iterator varlist_begin() const { return getVars().begin(); }
varlist_const_iterator varlist_end() const { return getVars().end(); }
clauselist_range clauselists() {
return clauselist_range(clauselist_begin(), clauselist_end());
}
clauselist_const_range clauselists() const {
return clauselist_const_range(clauselist_begin(), clauselist_end());
}
clauselist_iterator clauselist_begin() { return getClauses().begin(); }
clauselist_iterator clauselist_end() { return getClauses().end(); }
clauselist_const_iterator clauselist_begin() const {
return getClauses().begin();
}
clauselist_const_iterator clauselist_end() const {
return getClauses().end();
}
static bool classof(const Decl *D) { return classofKind(D->getKind()); }
static bool classofKind(Kind K) { return K == OMPAllocate; }
};
} // end namespace clang
#endif
|
kernel_GICOV.h |
#pragma omp target teams distribute parallel for thread_limit(work_group_size)
for (int gid = 0; gid < global_work_size; gid++) {
// Determine this thread's pixel
int i = gid/local_work_size + MAX_RAD + 2;
int j = gid%local_work_size + MAX_RAD + 2;
// Initialize the maximal GICOV score to 0
float max_GICOV = 0.f;
// Iterate across each stencil
for (int k = 0; k < NCIRCLES; k++) {
// Variables used to compute the mean and variance
// of the gradients along the current stencil
float sum = 0.f, M2 = 0.f, mean = 0.f;
// Iterate across each sample point in the current stencil
for (int n = 0; n < NPOINTS; n++) {
// Determine the x- and y-coordinates of the current sample point
int y = j + host_tY[(k * NPOINTS) + n];
int x = i + host_tX[(k * NPOINTS) + n];
// Compute the combined gradient value at the current sample point
int addr = x * grad_m + y;
float p = host_grad_x[addr] * host_cos_angle[n] +
host_grad_y[addr] * host_sin_angle[n];
// Update the running total
sum += p;
// Partially compute the variance
float delta = p - mean;
mean = mean + (delta / (float) (n + 1));
M2 = M2 + (delta * (p - mean));
}
// Finish computing the mean
mean = sum / ((float) NPOINTS);
// Finish computing the variance
float var = M2 / ((float) (NPOINTS - 1));
// Keep track of the maximal GICOV value seen so far
if (((mean * mean) / var) > max_GICOV) max_GICOV = (mean * mean) / var;
}
// Store the maximal GICOV value
host_gicov[(i * grad_m) + j] = max_GICOV;
}
|
GB_unaryop__identity_uint64_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__identity_uint64_uint32
// op(A') function: GB_tran__identity_uint64_uint32
// C type: uint64_t
// A type: uint32_t
// cast: uint64_t cij = (uint64_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, x) \
uint64_t z = (uint64_t) x ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (x, aij) ; \
GB_OP (GB_CX (pC), x) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT64 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__identity_uint64_uint32
(
uint64_t *restrict Cx,
const uint32_t *restrict Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (int64_t p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__identity_uint64_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t **Rowcounts,
GBI_single_iterator Iter,
const int64_t *restrict A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
quantize.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_UTILS_QUANTIZE_H_
#define MACE_UTILS_QUANTIZE_H_
#include <limits>
#include <algorithm>
namespace mace {
template<typename T>
inline void AdjustRange(const float in_min_data,
const float in_max_data,
const bool non_zero,
float *scale,
int32_t *zero_point) {
// re-range to make range include zero float and
// make zero float as integer u8
const T quantized_min = std::numeric_limits<T>::lowest();
const T quantized_max = std::numeric_limits<T>::max();
if (quantized_min < 0) {
MACE_ASSERT(!non_zero, "Cannot nudge to non_zero quantize value.");
}
float out_max = std::max(0.f, in_max_data);
float out_min = std::min(0.f, in_min_data);
// make in_min_data quantize as greater than 1
if (non_zero) {
out_min = std::min(out_min,
in_min_data - (out_max - in_min_data)
/ (quantized_max - quantized_min - 1));
}
*scale = (out_max - out_min) / (quantized_max - quantized_min);
const float kEps = 1e-6;
if (out_min < -kEps && out_max > kEps) {
float quantized_zero = -out_min / *scale;
int32_t
quantized_zero_near_int = static_cast<int32_t>(roundf(quantized_zero));
*zero_point = quantized_zero_near_int;
if (fabs(quantized_zero - quantized_zero_near_int) > kEps) {
if (quantized_zero < quantized_zero_near_int || non_zero) {
// keep out_max fixed, and move out_min
*zero_point = static_cast<int32_t>(std::ceil(quantized_zero));
*scale = out_max / (quantized_max - *zero_point);
} else {
// keep out_min fixed, and move out_max
*scale = out_min / (quantized_min - *zero_point);
}
}
} else if (out_min > -kEps) {
*zero_point = quantized_min;
} else {
*zero_point = quantized_max;
}
}
template<typename T>
inline T Saturate(float value) {
int rounded_value = static_cast<int>(value);
if (rounded_value <= std::numeric_limits<T>::lowest()) {
return std::numeric_limits<T>::lowest();
} else if (rounded_value >= std::numeric_limits<T>::max()) {
return std::numeric_limits<T>::max();
} else {
return static_cast<T>(rounded_value);
}
}
inline void FindMinMax(const float *input,
const index_t size,
float *min_val, float *max_val) {
float max_v = std::numeric_limits<float>::lowest();
float min_v = std::numeric_limits<float>::max();
for (index_t i = 0; i < size; ++i) {
max_v = std::max(max_v, input[i]);
min_v = std::min(min_v, input[i]);
}
*min_val = min_v;
*max_val = max_v;
}
template<typename T>
inline void QuantizeWithScaleAndZeropoint(const float *input,
const index_t size,
float scale,
int32_t zero_point,
T *output) {
float recip_scale = 1 / scale;
#pragma omp parallel for
for (int i = 0; i < size; ++i) {
output[i] = Saturate<T>(roundf(zero_point + recip_scale * input[i]));
}
}
template<typename T>
inline void Quantize(const float *input,
const index_t size,
bool non_zero,
T *output,
float *scale,
int32_t *zero_point) {
float in_min_data;
float in_max_data;
FindMinMax(input, size, &in_min_data, &in_max_data);
AdjustRange<T>(in_min_data, in_max_data, non_zero,
scale, zero_point);
QuantizeWithScaleAndZeropoint(input, size, *scale, *zero_point, output);
}
template<typename T>
inline void Dequantize(const T *input,
const index_t size,
const float scale,
const int32_t zero_point,
float *output) {
#pragma omp parallel for
for (int i = 0; i < size; ++i) {
output[i] = scale * (input[i] - zero_point);
}
}
inline void QuantizeMultiplier(double multiplier,
int32_t* output_multiplier,
int32_t* shift) {
if (multiplier == 0.f) {
*output_multiplier = 0;
*shift = 0;
return;
}
const double q = std::frexp(multiplier, shift);
auto qint = static_cast<int64_t>(roundl(q * (1ll << 31)));
if (qint == (1ll << 31)) {
qint /= 2;
++*shift;
}
*output_multiplier = static_cast<int32_t>(qint);
MACE_CHECK(*output_multiplier <= std::numeric_limits<int32_t>::max());
}
inline void GetOutputMultiplierAndShift(
const float lhs_scale, const float rhs_scale, const float output_scale,
int32_t *quantized_multiplier, int *right_shift) {
float real_multiplier = lhs_scale * rhs_scale / output_scale;
MACE_CHECK(real_multiplier > 0.f && real_multiplier < 1.f, real_multiplier);
int exponent;
QuantizeMultiplier(real_multiplier, quantized_multiplier, &exponent);
*right_shift = -exponent;
MACE_CHECK(*right_shift >= 0);
}
} // namespace mace
#endif // MACE_UTILS_QUANTIZE_H_
|
dense_minmax.c | /* Copyright (c) 2016-2017 Drew Schmidt
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <stdlib.h>
#include <string.h>
#include "coop.h"
#include "utils/fill.h"
#include "utils/inverse.h"
#include "utils/safeomp.h"
#include "utils/special_vals.h"
#define TYPE_MIN 1
#define TYPE_MAX 2
#define TYPE_ABSMIN 3
#define TYPE_ABSMAX 4
typedef struct
{
int K; // Number of comparisons to retain
double *restrict co; // co-variance, sine, ...
int *restrict I; // i
int *restrict J; // j
int *restrict L; // length of run (number of comparisons)
} maxco_t;
// almost sorted sort
// x has some number in the first slot, and is sorted increasing otherwise
// sort x low to high, bring y and z along for the ride
static inline void assort(const register int K, double *restrict A, int *restrict I, int *restrict J, int *restrict L)
{
int ind;
const register double atmp = A[0];
const register int itmp = I[0];
const register int jtmp = J[0];
const register int ltmp = L[0];
for (ind=1; ind<K && A[ind] < atmp; ind++)
{
A[ind] = A[ind + 1];
I[ind] = I[ind + 1];
J[ind] = J[ind + 1];
L[ind] = L[ind + 1];
}
A[ind] = atmp;
I[ind] = itmp;
J[ind] = jtmp;
L[ind] = ltmp;
}
// A sorted least to greatest
static inline void rename_me(const int type, const double cmp, const int K, maxco_t *mx)
{
if (type == TYPE_MIN)
{
// TODO
}
else if (type == TYPE_ABSMIN)
{
// TODO
}
else if (type == TYPE_MAX)
{
if (cmp > mx->co[0])
assort(mx->K, mx->co, mx->I, mx->J);
}
else if (type == TYPE_ABSMAX)
{
// TODO
}
}
static inline void compute_sums(const int m, const int mi, const double * const restrict vec, const double * const restrict x, double *restrict sumx, double *restrict sumy, int *restrict len)
{
int k;
*sumx = 0;
*sumy = 0;
*len = 0;
PLEASE_VECTORIZE
for (k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
{
*sumx += vec[k];
*sumy += x[k + mi];
(*len)++;
}
}
}
// cor - vals, I/J - their indices, L - length of the run
int coop_maxpcor_mat_inplace_pairwise(const bool inv, const int m, const int n, const double * const restrict x, maxco_t *mx)
{
int check;
int ind = 0;
double *vec = malloc(m * sizeof(*vec));
CHECKMALLOC(vec);
for (int j=0; j<n; j++)
{
const int mj = m*j;
memcpy(vec, x+mj, m*sizeof(*vec));
// #pragma omp parallel for default(none) shared(j, vec) if(m*n > OMP_MIN_SIZE)
for (int i=j; i<n; i++)
{
const int mi = m*i;
int len;
double meanx, meany;
compute_sums(m, mi, vec, x, &meanx, &meany, &len);
if (len == 0 || len == 1)
{
set_na_real(mx->cor + (i + n*j));
set_na_real(mx->cor + (j + n*i));
mx->I[ind] = i;
mx->J[ind] = j;
mx->L[ind] = len;
ind++;
continue;
}
const double dlen = (double) len;
meanx /= dlen;
meany /= dlen;
double sdx = 0.;
double sdy = 0.;
SAFE_SIMD
for (int k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
{
sdx += (vec[k] - meanx)*(vec[k] - meanx);
sdy += (x[k + mi] - meany)*(x[k + mi] - meany);
}
}
sdx = sqrt(sdx/(dlen-1.));
sdy = sqrt(sdy/(dlen-1.));
double mmcp = 0.0;
SAFE_SIMD
for (int k=0; k<m; k++)
{
if (!isnan(vec[k]) && !isnan(x[k + mi]))
mmcp += (vec[k] - meanx) * (x[k + mi] - meany);
}
rename_me(TYPE_MAX, mmcp*dlen, mx);
// cor[i + n*j] = mmcp / sdx / sdy / (dlen - 1.0);;
}
}
free(vec);
if (inv)
{
check = inv_sym_chol(n, cor);
CHECKRET(check);
}
return COOP_OK;
}
|
9706.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "atax.h"
/* Array initialization. */
static
void init_array (int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny))
{
int i, j;
for (i = 0; i < ny; i++)
x[i] = i * M_PI;
for (i = 0; i < nx; i++)
for (j = 0; j < ny; j++)
A[i][j] = ((DATA_TYPE) i*(j+1)) / nx;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int nx,
DATA_TYPE POLYBENCH_1D(y,NX,nx))
{
int i;
for (i = 0; i < nx; i++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, y[i]);
if (i % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_atax(int nx, int ny,
DATA_TYPE POLYBENCH_2D(A,NX,NY,nx,ny),
DATA_TYPE POLYBENCH_1D(x,NY,ny),
DATA_TYPE POLYBENCH_1D(y,NY,ny),
DATA_TYPE POLYBENCH_1D(tmp,NX,nx))
{
int i, j;
#pragma scop
#pragma omp parallel
{
#pragma omp parallel for schedule(static, 1) num_threads(1)
for (i = 0; i < _PB_NY; i++)
y[i] = 0;
#pragma omp parallel for private (j) schedule(static, 1) num_threads(1)
for (i = 0; i < _PB_NX; i++)
{
tmp[i] = 0;
for (j = 0; j < _PB_NY; j++)
tmp[i] = tmp[i] + A[i][j] * x[j];
for (j = 0; j < _PB_NY; j++)
y[j] = y[j] + A[i][j] * tmp[i];
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int nx = NX;
int ny = NY;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NX, NY, nx, ny);
POLYBENCH_1D_ARRAY_DECL(x, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(y, DATA_TYPE, NY, ny);
POLYBENCH_1D_ARRAY_DECL(tmp, DATA_TYPE, NX, nx);
/* Initialize array(s). */
init_array (nx, ny, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(x));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_atax (nx, ny,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(x),
POLYBENCH_ARRAY(y),
POLYBENCH_ARRAY(tmp));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(nx, POLYBENCH_ARRAY(y)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(x);
POLYBENCH_FREE_ARRAY(y);
POLYBENCH_FREE_ARRAY(tmp);
return 0;
}
|
rwpng.c | /*---------------------------------------------------------------------------
pngquant: RGBA -> RGBA-palette quantization program rwpng.c
---------------------------------------------------------------------------
© 1998-2000 by Greg Roelofs.
© 2009-2014 by Kornel Lesiński.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
---------------------------------------------------------------------------*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "png.h"
#include "rwpng.h"
#if USE_LCMS
#include "lcms2.h"
#endif
#ifndef Z_BEST_COMPRESSION
#define Z_BEST_COMPRESSION 9
#endif
#ifndef Z_BEST_SPEED
#define Z_BEST_SPEED 1
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#define omp_get_max_threads() 1
#endif
#if PNG_LIBPNG_VER < 10500
typedef png_const_charp png_const_bytep;
#endif
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg);
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg);
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg);
int rwpng_read_image24_cocoa(FILE *infile, png24_image *mainprog_ptr);
void rwpng_version_info(FILE *fp)
{
const char *pngver = png_get_header_ver(NULL);
#if USE_COCOA
fprintf(fp, " Using Apple Cocoa image reader and libpng %s.\n", pngver);
#elif USE_LCMS
fprintf(fp, " Using libpng %s with Little CMS color profile support.\n", pngver);
#else
fprintf(fp, " Using libpng %s.\n", pngver);
#endif
#if PNG_LIBPNG_VER < 10600
if (strcmp(pngver, "1.3.") < 0) {
fputs("\nWARNING: Your version of libpng is outdated and may produce corrupted files.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
} else if (strcmp(pngver, "1.6.") < 0) {
#if defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
fputs("\nWARNING: Your version of libpng is old and has buggy support for custom chunks.\n"
"Please recompile pngquant with the current version of libpng (1.6 or later).\n", fp);
#endif
}
#endif
}
struct rwpng_read_data {
FILE *const fp;
png_size_t bytes_read;
};
static void user_read_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_read_data *read_data = (struct rwpng_read_data *)png_get_io_ptr(png_ptr);
png_size_t read = fread(data, 1, length, read_data->fp);
if (!read) {
png_error(png_ptr, "Read error");
}
read_data->bytes_read += read;
}
struct rwpng_write_state {
FILE *outfile;
png_size_t maximum_file_size;
png_size_t bytes_written;
pngquant_error retval;
};
static void user_write_data(png_structp png_ptr, png_bytep data, png_size_t length)
{
struct rwpng_write_state *write_state = (struct rwpng_write_state *)png_get_io_ptr(png_ptr);
if (SUCCESS != write_state->retval) {
return;
}
if (!fwrite(data, length, 1, write_state->outfile)) {
write_state->retval = CANT_WRITE_ERROR;
}
write_state->bytes_written += length;
}
static void user_flush_data(png_structp png_ptr)
{
// libpng never calls this :(
}
static png_bytepp rwpng_create_row_pointers(png_infop info_ptr, png_structp png_ptr, unsigned char *base, unsigned int height, unsigned int rowbytes)
{
if (!rowbytes) {
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
}
png_bytepp row_pointers = malloc(height * sizeof(row_pointers[0]));
if (!row_pointers) return NULL;
for(unsigned int row = 0; row < height; ++row) {
row_pointers[row] = base + row * rowbytes;
}
return row_pointers;
}
static int read_chunk_callback(png_structp png_ptr, png_unknown_chunkp in_chunk)
{
if (0 == memcmp("iCCP", in_chunk->name, 5) ||
0 == memcmp("cHRM", in_chunk->name, 5) ||
0 == memcmp("gAMA", in_chunk->name, 5)) {
return 0; // not handled
}
struct rwpng_chunk **head = (struct rwpng_chunk **)png_get_user_chunk_ptr(png_ptr);
struct rwpng_chunk *chunk = malloc(sizeof(struct rwpng_chunk));
memcpy(chunk->name, in_chunk->name, 5);
chunk->size = in_chunk->size;
chunk->location = in_chunk->location;
chunk->data = in_chunk->size ? malloc(in_chunk->size) : NULL;
if (in_chunk->size) {
memcpy(chunk->data, in_chunk->data, in_chunk->size);
}
chunk->next = *head;
*head = chunk;
return 1; // marks as "handled", libpng won't store it
}
/*
retval:
0 = success
21 = bad sig
22 = bad IHDR
24 = insufficient memory
25 = libpng error (via longjmp())
26 = wrong PNG color type (no alpha channel)
*/
pngquant_error rwpng_read_image24_libpng(FILE *infile, png24_image *mainprog_ptr, int verbose)
{
png_structp png_ptr = NULL;
png_infop info_ptr = NULL;
png_size_t rowbytes;
int color_type, bit_depth;
png_ptr = png_create_read_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr,
rwpng_error_handler, verbose ? rwpng_warning_stderr_handler : rwpng_warning_silent_handler);
if (!png_ptr) {
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
info_ptr = png_create_info_struct(png_ptr);
if (!info_ptr) {
png_destroy_read_struct(&png_ptr, NULL, NULL);
return PNG_OUT_OF_MEMORY_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a non-trivial
* libpng function */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return LIBPNG_FATAL_ERROR; /* fatal libpng error (via longjmp()) */
}
#if PNG_LIBPNG_VER >= 10500 && defined(PNG_UNKNOWN_CHUNKS_SUPPORTED)
/* copy standard chunks too */
png_set_keep_unknown_chunks(png_ptr, PNG_HANDLE_CHUNK_IF_SAFE, (png_const_bytep)"pHYs\0iTXt\0tEXt\0zTXt", 4);
#endif
png_set_read_user_chunk_fn(png_ptr, &mainprog_ptr->chunks, read_chunk_callback);
struct rwpng_read_data read_data = {infile, 0};
png_set_read_fn(png_ptr, &read_data, user_read_data);
png_read_info(png_ptr, info_ptr); /* read all PNG info up to image data */
/* alternatively, could make separate calls to png_get_image_width(),
* etc., but want bit_depth and color_type for later [don't care about
* compression_type and filter_type => NULLs] */
png_get_IHDR(png_ptr, info_ptr, &mainprog_ptr->width, &mainprog_ptr->height,
&bit_depth, &color_type, NULL, NULL, NULL);
/* expand palette images to RGB, low-bit-depth grayscale images to 8 bits,
* transparency chunks to full alpha channel; strip 16-bit-per-sample
* images to 8 bits per sample; and convert grayscale to RGB[A] */
/* GRR TO DO: preserve all safe-to-copy ancillary PNG chunks */
if (!(color_type & PNG_COLOR_MASK_ALPHA)) {
#ifdef PNG_READ_FILLER_SUPPORTED
png_set_expand(png_ptr);
png_set_filler(png_ptr, 65535L, PNG_FILLER_AFTER);
#else
fprintf(stderr, "pngquant readpng: image is neither RGBA nor GA\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->retval = WRONG_INPUT_COLOR_TYPE;
return mainprog_ptr->retval;
#endif
}
if (bit_depth == 16) {
png_set_strip_16(png_ptr);
}
if (!(color_type & PNG_COLOR_MASK_COLOR)) {
png_set_gray_to_rgb(png_ptr);
}
/* get source gamma for gamma correction, or use sRGB default */
double gamma = 0.45455;
if (!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB)) {
png_get_gAMA(png_ptr, info_ptr, &gamma);
if (gamma < 0 || gamma > 1.0) {
fprintf(stderr, "pngquant readpng: ignored out-of-range gamma %f\n", gamma);
gamma = 0.45455;
}
}
mainprog_ptr->gamma = gamma;
png_set_interlace_handling(png_ptr);
/* all transformations have been registered; now update info_ptr data,
* get rowbytes and channels, and allocate image memory */
png_read_update_info(png_ptr, info_ptr);
rowbytes = png_get_rowbytes(png_ptr, info_ptr);
if ((mainprog_ptr->rgba_data = malloc(rowbytes*mainprog_ptr->height)) == NULL) {
fprintf(stderr, "pngquant readpng: unable to allocate image data\n");
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
return PNG_OUT_OF_MEMORY_ERROR;
}
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
/* now we can go ahead and just read the whole image */
png_read_image(png_ptr, row_pointers);
/* and we're done! (png_read_end() can be omitted if no processing of
* post-IDAT text/time/etc. is desired) */
png_read_end(png_ptr, NULL);
#if USE_LCMS
#if PNG_LIBPNG_VER < 10500
png_charp ProfileData;
#else
png_bytep ProfileData;
#endif
png_uint_32 ProfileLen;
cmsHPROFILE hInProfile = NULL;
/* color_type is read from the image before conversion to RGBA */
int COLOR_PNG = color_type & PNG_COLOR_MASK_COLOR;
mainprog_ptr->lcms_status = NONE;
/* embedded ICC profile */
if (png_get_iCCP(png_ptr, info_ptr, &(png_charp){0}, &(int){0}, &ProfileData, &ProfileLen)) {
hInProfile = cmsOpenProfileFromMem(ProfileData, ProfileLen);
cmsColorSpaceSignature colorspace = cmsGetColorSpace(hInProfile);
/* only RGB (and GRAY) valid for PNGs */
if (colorspace == cmsSigRgbData && COLOR_PNG) {
mainprog_ptr->lcms_status = ICCP;
} else {
if (colorspace == cmsSigGrayData && !COLOR_PNG) {
mainprog_ptr->lcms_status = ICCP_WARN_GRAY;
}
cmsCloseProfile(hInProfile);
hInProfile = NULL;
}
}
/* build RGB profile from cHRM and gAMA */
if (hInProfile == NULL && COLOR_PNG &&
!png_get_valid(png_ptr, info_ptr, PNG_INFO_sRGB) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) &&
png_get_valid(png_ptr, info_ptr, PNG_INFO_cHRM)) {
cmsCIExyY WhitePoint;
cmsCIExyYTRIPLE Primaries;
png_get_cHRM(png_ptr, info_ptr, &WhitePoint.x, &WhitePoint.y,
&Primaries.Red.x, &Primaries.Red.y,
&Primaries.Green.x, &Primaries.Green.y,
&Primaries.Blue.x, &Primaries.Blue.y);
WhitePoint.Y = Primaries.Red.Y = Primaries.Green.Y = Primaries.Blue.Y = 1.0;
cmsToneCurve *GammaTable[3];
GammaTable[0] = GammaTable[1] = GammaTable[2] = cmsBuildGamma(NULL, 1/gamma);
hInProfile = cmsCreateRGBProfile(&WhitePoint, &Primaries, GammaTable);
cmsFreeToneCurve(GammaTable[0]);
mainprog_ptr->lcms_status = GAMA_CHRM;
}
/* transform image to sRGB colorspace */
if (hInProfile != NULL) {
cmsHPROFILE hOutProfile = cmsCreate_sRGBProfile();
cmsHTRANSFORM hTransform = cmsCreateTransform(hInProfile, TYPE_RGBA_8,
hOutProfile, TYPE_RGBA_8,
INTENT_PERCEPTUAL,
omp_get_max_threads() > 1 ? cmsFLAGS_NOCACHE : 0);
#pragma omp parallel for \
if (mainprog_ptr->height*mainprog_ptr->width > 8000) \
schedule(static)
for (unsigned int i = 0; i < mainprog_ptr->height; i++) {
/* It is safe to use the same block for input and output,
when both are of the same TYPE. */
cmsDoTransform(hTransform, row_pointers[i],
row_pointers[i],
mainprog_ptr->width);
}
cmsDeleteTransform(hTransform);
cmsCloseProfile(hOutProfile);
cmsCloseProfile(hInProfile);
mainprog_ptr->gamma = 0.45455;
}
#endif
png_destroy_read_struct(&png_ptr, &info_ptr, NULL);
mainprog_ptr->file_size = read_data.bytes_read;
mainprog_ptr->row_pointers = (unsigned char **)row_pointers;
return SUCCESS;
}
static void rwpng_free_chunks(struct rwpng_chunk *chunk) {
if (!chunk) return;
rwpng_free_chunks(chunk->next);
free(chunk->data);
free(chunk);
}
void rwpng_free_image24(png24_image *image)
{
free(image->row_pointers);
image->row_pointers = NULL;
free(image->rgba_data);
image->rgba_data = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
void rwpng_free_image8(png8_image *image)
{
free(image->indexed_data);
image->indexed_data = NULL;
free(image->row_pointers);
image->row_pointers = NULL;
rwpng_free_chunks(image->chunks);
image->chunks = NULL;
}
pngquant_error rwpng_read_image24(FILE *infile, png24_image *input_image_p, int verbose)
{
#if USE_COCOA
return rwpng_read_image24_cocoa(infile, input_image_p);
#else
return rwpng_read_image24_libpng(infile, input_image_p, verbose);
#endif
}
static pngquant_error rwpng_write_image_init(rwpng_png_image *mainprog_ptr, png_structpp png_ptr_p, png_infopp info_ptr_p, int fast_compression)
{
/* could also replace libpng warning-handler (final NULL), but no need: */
*png_ptr_p = png_create_write_struct(PNG_LIBPNG_VER_STRING, mainprog_ptr, rwpng_error_handler, NULL);
if (!(*png_ptr_p)) {
return LIBPNG_INIT_ERROR; /* out of memory */
}
*info_ptr_p = png_create_info_struct(*png_ptr_p);
if (!(*info_ptr_p)) {
png_destroy_write_struct(png_ptr_p, NULL);
return LIBPNG_INIT_ERROR; /* out of memory */
}
/* setjmp() must be called in every function that calls a PNG-writing
* libpng function, unless an alternate error handler was installed--
* but compatible error handlers must either use longjmp() themselves
* (as in this program) or exit immediately, so here we go: */
if (setjmp(mainprog_ptr->jmpbuf)) {
png_destroy_write_struct(png_ptr_p, info_ptr_p);
return LIBPNG_INIT_ERROR; /* libpng error (via longjmp()) */
}
png_set_compression_level(*png_ptr_p, fast_compression ? Z_BEST_SPEED : Z_BEST_COMPRESSION);
png_set_compression_mem_level(*png_ptr_p, fast_compression ? 9 : 5); // judging by optipng results, smaller mem makes libpng compress slightly better
return SUCCESS;
}
void rwpng_write_end(png_infopp info_ptr_p, png_structpp png_ptr_p, png_bytepp row_pointers)
{
png_write_info(*png_ptr_p, *info_ptr_p);
png_set_packing(*png_ptr_p);
png_write_image(*png_ptr_p, row_pointers);
png_write_end(*png_ptr_p, NULL);
png_destroy_write_struct(png_ptr_p, info_ptr_p);
}
void rwpng_set_gamma(png_infop info_ptr, png_structp png_ptr, double gamma)
{
/* remap sets gamma to 0.45455 */
png_set_gAMA(png_ptr, info_ptr, gamma);
png_set_sRGB(png_ptr, info_ptr, 0); // 0 = Perceptual
}
pngquant_error rwpng_write_image8(FILE *outfile, const png8_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, mainprog_ptr->fast_compression);
if (retval) return retval;
struct rwpng_write_state write_state;
write_state = (struct rwpng_write_state){
.outfile = outfile,
.maximum_file_size = mainprog_ptr->maximum_file_size,
.retval = SUCCESS,
};
png_set_write_fn(png_ptr, &write_state, user_write_data, user_flush_data);
// Palette images generally don't gain anything from filtering
png_set_filter(png_ptr, PNG_FILTER_TYPE_BASE, PNG_FILTER_VALUE_NONE);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma);
/* set the image parameters appropriately */
int sample_depth;
#if PNG_LIBPNG_VER > 10400 /* old libpng corrupts files with low depth */
if (mainprog_ptr->num_palette <= 2)
sample_depth = 1;
else if (mainprog_ptr->num_palette <= 4)
sample_depth = 2;
else if (mainprog_ptr->num_palette <= 16)
sample_depth = 4;
else
#endif
sample_depth = 8;
struct rwpng_chunk *chunk = mainprog_ptr->chunks;
int chunk_num=0;
while(chunk) {
png_unknown_chunk pngchunk = {
.size = chunk->size,
.data = chunk->data,
.location = chunk->location,
};
memcpy(pngchunk.name, chunk->name, 5);
png_set_unknown_chunks(png_ptr, info_ptr, &pngchunk, 1);
#if defined(PNG_HAVE_IHDR) && PNG_LIBPNG_VER < 10600
png_set_unknown_chunk_location(png_ptr, info_ptr, chunk_num, pngchunk.location ? pngchunk.location : PNG_HAVE_IHDR);
#endif
chunk = chunk->next;
chunk_num++;
}
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
sample_depth, PNG_COLOR_TYPE_PALETTE,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_set_PLTE(png_ptr, info_ptr, &mainprog_ptr->palette[0], mainprog_ptr->num_palette);
if (mainprog_ptr->num_trans > 0) {
png_set_tRNS(png_ptr, info_ptr, mainprog_ptr->trans, mainprog_ptr->num_trans, NULL);
}
rwpng_write_end(&info_ptr, &png_ptr, mainprog_ptr->row_pointers);
if (SUCCESS == write_state.retval && write_state.maximum_file_size && write_state.bytes_written > write_state.maximum_file_size) {
return TOO_LARGE_FILE;
}
return write_state.retval;
}
pngquant_error rwpng_write_image24(FILE *outfile, const png24_image *mainprog_ptr)
{
png_structp png_ptr;
png_infop info_ptr;
pngquant_error retval = rwpng_write_image_init((rwpng_png_image*)mainprog_ptr, &png_ptr, &info_ptr, 0);
if (retval) return retval;
png_init_io(png_ptr, outfile);
rwpng_set_gamma(info_ptr, png_ptr, mainprog_ptr->gamma);
png_set_IHDR(png_ptr, info_ptr, mainprog_ptr->width, mainprog_ptr->height,
8, PNG_COLOR_TYPE_RGB_ALPHA,
0, PNG_COMPRESSION_TYPE_DEFAULT,
PNG_FILTER_TYPE_BASE);
png_bytepp row_pointers = rwpng_create_row_pointers(info_ptr, png_ptr, mainprog_ptr->rgba_data, mainprog_ptr->height, 0);
rwpng_write_end(&info_ptr, &png_ptr, row_pointers);
free(row_pointers);
return SUCCESS;
}
static void rwpng_warning_stderr_handler(png_structp png_ptr, png_const_charp msg) {
fprintf(stderr, " %s\n", msg);
}
static void rwpng_warning_silent_handler(png_structp png_ptr, png_const_charp msg) {
}
static void rwpng_error_handler(png_structp png_ptr, png_const_charp msg)
{
rwpng_png_image *mainprog_ptr;
/* This function, aside from the extra step of retrieving the "error
* pointer" (below) and the fact that it exists within the application
* rather than within libpng, is essentially identical to libpng's
* default error handler. The second point is critical: since both
* setjmp() and longjmp() are called from the same code, they are
* guaranteed to have compatible notions of how big a jmp_buf is,
* regardless of whether _BSD_SOURCE or anything else has (or has not)
* been defined. */
fprintf(stderr, " error: %s (libpng failed)\n", msg);
fflush(stderr);
mainprog_ptr = png_get_error_ptr(png_ptr);
if (mainprog_ptr == NULL) abort();
longjmp(mainprog_ptr->jmpbuf, 1);
}
|
django_fmt_plug.c | /* Django 1.4 patch for JtR. Hacked together during May of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com>.
*
* This software is Copyright (c) 2012, Dhiru Kholia <dhiru.kholia at gmail.com>,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without modification,
* are permitted.
*
* Input Format => user:$django$*type*django-hash
*
* Where,
*
* type => 1, for Django 1.4 pbkdf_sha256 hashes and
*
* django-hash => Second column of "SELECT username, password FROM auth_user"
*
* July, 2012, the oSSL PKCS5_PBKDF2_HMAC function was replaced with a much faster
* function pbkdf2() designed by JimF. Originally this function was designed for
* the mscash2 (DCC2). The same pbkdf2 function, is used, and simply required small
* changes to use SHA256.
*
* This new code is 3x to 4x FASTER than the original oSSL code. Even though it is
* only useing oSSL functions. A lot of the high level stuff in oSSL sux for speed.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_django;
#elif FMT_REGISTERS_H
john_register_one(&fmt_django);
#else
// uncomment this header to use the slower PKCS5_PBKDF2_HMAC function.
// Note, PKCS5_PBKDF2_HMAC is ONLY available in oSSL 1.00 + (1.0c I think to be exact)
//#include <openssl/evp.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "arch.h"
#include "sha2.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "johnswap.h"
#include "base64.h"
#include "base64_convert.h"
#include "pbkdf2_hmac_sha256.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4 // tuned on core i7
#endif
static int omp_t = 1;
#endif
#include "memdbg.h"
#define FORMAT_LABEL "Django"
#define FORMAT_NAME ""
#define FORMAT_TAG "$django$*"
#define FORMAT_TAG_LEN (sizeof(FORMAT_TAG)-1)
#ifdef SIMD_COEF_32
#define ALGORITHM_NAME "PBKDF2-SHA256 " SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "PBKDF2-SHA256 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT " (x10000)"
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define HASH_LENGTH 44
#define BINARY_SIZE 32
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN sizeof(uint32_t)
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#define MAX_KEYS_PER_CRYPT SSE_GROUP_SZ_SHA256
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests django_tests[] = {
{"$django$*1*pbkdf2_sha256$10000$qPmFbibfAY06$x/geVEkdZSlJMqvIYJ7G6i5l/6KJ0UpvLUU6cfj83VM=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd7$2nTDwPhSsDKOwpKiV04teVtf+a14Rs7na/lIB3KnHkM=", "123"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd1$bkdQo9RoatRomupPFP+XEo+Guuirq4mi+R1cFcV0U3M=", "openwall"},
{"$django$*1*pbkdf2_sha256$10000$BVmpZMBhRSd6$Uq33DAHOFHUED+32IIqCqm+ITU1mhsGOJ7YwFf6h+6k=", "password"},
{"$django$*1*pbkdf2_sha256$10000$34L3roCQ6ZfN$R21tJK1sIDfmj9BfBocefFfuGVwE3pXcLEhChNjc+pU=", "0123456789012345678901234567890123456789012345678901234567890123"},
{"$django$*1*pbkdf2_sha256$10000$7qPqyUDw8kZV$pFmVRjlHvayoWEy8ZWXkHgfmgImUKLmkmruclpYVAxM=", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static uint32_t (*crypt_out)[BINARY_SIZE / sizeof(uint32_t)];
static struct custom_salt {
int type;
int iterations;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc_align(sizeof(*saved_key),
self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *ctcopy, *keeptr, *p;
if (strncmp(ciphertext, FORMAT_TAG, FORMAT_TAG_LEN) != 0)
return 0;
ctcopy = strdup(ciphertext);
keeptr = ctcopy;;
ctcopy += FORMAT_TAG_LEN;
if ((p = strtokm(ctcopy, "*")) == NULL) /* type */
goto err;
/* type must be 1 */
if (!isdec(p))
goto err;
if (atoi(p) != 1)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* algorithm */
goto err;
if (strcmp(p, "pbkdf2_sha256") != 0)
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* iterations */
goto err;
if (!isdec(p)) // FIXME: what about iterations == 0?
goto err;
if ((p = strtokm(NULL, "$")) == NULL) /* salt */
goto err;
if (strlen(p) > sizeof(cur_salt->salt)-1)
goto err;
if ((p = strtokm(NULL, "")) == NULL) /* hash */
goto err;
if (strlen(p)-1 != base64_valid_length(p,e_b64_mime,flg_Base64_MIME_TRAIL_EQ, 0) || strlen(p)-1 > HASH_LENGTH-1) {
goto err;
}
MEM_FREE(keeptr);
return 1;
err:
MEM_FREE(keeptr);
return 0;
}
static void *get_salt(char *ciphertext)
{
char Buf[120], *ctcopy=Buf;
char *p, *t;
static struct custom_salt cs;
memset(&cs, 0, sizeof(cs));
strncpy(Buf, ciphertext, 119);
Buf[119] = 0;
ctcopy += FORMAT_TAG_LEN; /* skip over "$django$*" */
p = strtokm(ctcopy, "*");
cs.type = atoi(p);
strtokm(NULL, "$");
t = strtokm(NULL, "$");
cs.iterations = atoi(t);
t = strtokm(NULL, "$");
strcpy((char*)cs.salt, t);
return (void *)&cs;
}
static void *get_binary(char *ciphertext)
{ static union {
unsigned char c[BINARY_SIZE+1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_decode(p, strlen(p), (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
int lens[MAX_KEYS_PER_CRYPT], i;
unsigned char *pin[MAX_KEYS_PER_CRYPT];
union {
uint32_t *pout[MAX_KEYS_PER_CRYPT];
unsigned char *poutc;
} x;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
lens[i] = strlen(saved_key[i+index]);
pin[i] = (unsigned char*)saved_key[i+index];
x.pout[i] = crypt_out[i+index];
}
pbkdf2_sha256_sse((const unsigned char **)pin, lens, cur_salt->salt, strlen((char*)cur_salt->salt), cur_salt->iterations, &(x.poutc), 32, 0);
#else
pbkdf2_sha256((unsigned char *)saved_key[index], strlen(saved_key[index]),
cur_salt->salt, strlen((char*)cur_salt->salt),
cur_salt->iterations, (unsigned char*)crypt_out[index], 32, 0);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
for (; index < count; index++)
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void django_set_key(char *key, int index)
{
strcpy(saved_key[index], key);
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int iteration_count(void *salt)
{
struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int)my_salt->iterations;
}
struct fmt_main fmt_django = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"iteration count",
},
{ FORMAT_TAG },
django_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
django_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
vectorOps.c |
/*=======================
C Program Template
Evan William Gretok
Month D, YEAR
=======================*/
// Inclusions
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <omp.h>
// Definitions
#define DEBUG 0
#define SIZE 100000000
// Main
int main( int argc, char *argv[ ] ) {
// Seed Your Random Number Generators
srand( time( NULL ) );
// Loop Iterator
int i = 0;
// Initialize Vector Memory Spaces
double *mainVector = malloc( SIZE * sizeof( double ) );
double *divVector = malloc( SIZE * sizeof( double ) );
double *solVector = malloc( SIZE * sizeof( double ) );
// Fill Vectors
double x = rand( );
printf( "%lf\n", x );
for( i = 0; i < SIZE; i++ ) {
mainVector[i] = ( rand( ) % 10000000 ) * 0.01;
divVector[i] = ( rand( ) % 1000 ) * 0.01;
solVector[i] = 0;
}
// DEBUG - Display Vectors
if( DEBUG ) {
for( i = 0; i < SIZE; i++ ) {
printf( " %5.2lf %1.2lf\n", mainVector[i], divVector[i] );
}
}
// Perform Processing
double start = omp_get_wtime( );
#pragma omp parallel for shared( mainVector, divVector, solVector ) \
private( i ) schedule( static, 10 ) num_threads( 4 )
for( i = 0; i < SIZE; i++ ) {
solVector[i] = mainVector[i] / divVector[i];
}
double end = omp_get_wtime( );
// DEBUG - Display Output
if( DEBUG ) {
for( i = 0; i < SIZE; i++ ) {
printf( " %1.2lf\n", solVector[i] );
}
}
// Display Timing
double solTime = end - start;
printf( "Vector Division Complete - %lf Seconds\n", solTime );
return 0;
}
// End .c - EWG SDG
|
3d7pt.lbpar.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Order-1, 3D 7 point stencil
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
double ****A = (double ****) malloc(sizeof(double***)*2);
A[0] = (double ***) malloc(sizeof(double**)*Nz);
A[1] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[0][i] = (double**) malloc(sizeof(double*)*Ny);
A[1][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[0][i][j] = (double*) malloc(sizeof(double)*Nx);
A[1][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 32;
tile_size[1] = 32;
tile_size[2] = 24;
tile_size[3] = 1024;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
const double alpha = 0.0876;
const double beta = 0.0765;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
/* Copyright (C) 1991-2014 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* glibc's intent is to support the IEC 559 math functionality, real
and complex. If the GCC (4.9 and later) predefined macros
specifying compiler intent are available, use them to determine
whether the overall intent is to support these features; otherwise,
presume an older compiler has intent to support these features and
define these macros by default. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5, t6, t7, t8;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) {
for (t1=-1;t1<=floord(Nt-2,16);t1++) {
lbp=max(ceild(t1,2),ceild(32*t1-Nt+3,32));
ubp=min(floord(Nt+Nz-4,32),floord(16*t1+Nz+13,32));
#pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8)
for (t2=lbp;t2<=ubp;t2++) {
for (t3=max(max(0,ceild(2*t1-2,3)),ceild(32*t2-Nz-20,24));t3<=min(min(min(floord(Nt+Ny-4,24),floord(16*t1+Ny+29,24)),floord(32*t2+Ny+28,24)),floord(32*t1-32*t2+Nz+Ny+27,24));t3++) {
for (t4=max(max(max(0,ceild(t1-63,64)),ceild(32*t2-Nz-1020,1024)),ceild(24*t3-Ny-1020,1024));t4<=min(min(min(min(floord(Nt+Nx-4,1024),floord(16*t1+Nx+29,1024)),floord(32*t2+Nx+28,1024)),floord(24*t3+Nx+20,1024)),floord(32*t1-32*t2+Nz+Nx+27,1024));t4++) {
for (t5=max(max(max(max(max(0,16*t1),32*t1-32*t2+1),32*t2-Nz+2),24*t3-Ny+2),1024*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,16*t1+31),32*t2+30),24*t3+22),1024*t4+1022),32*t1-32*t2+Nz+29);t5++) {
for (t6=max(max(32*t2,t5+1),-32*t1+32*t2+2*t5-31);t6<=min(min(32*t2+31,-32*t1+32*t2+2*t5),t5+Nz-2);t6++) {
for (t7=max(24*t3,t5+1);t7<=min(24*t3+23,t5+Ny-2);t7++) {
lbv=max(1024*t4,t5+1);
ubv=min(1024*t4+1023,t5+Nx-2);
#pragma ivdep
#pragma vector always
for (t8=lbv;t8<=ubv;t8++) {
A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));;
}
}
}
}
}
}
}
}
}
/* End of CLooG code */
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "constant")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays (Causing performance degradation
/* for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
*/
return 0;
}
|
par_csr_matop.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "_hypre_parcsr_mv.h"
#include "_hypre_lapack.h"
#include "_hypre_blas.h"
/*--------------------------------------------------------------------------
* hypre_ParMatmul_RowSizes:
*
* Computes sizes of C rows. Formerly part of hypre_ParMatmul but removed
* so it can also be used for multiplication of Boolean matrices.
*
* Arrays computed: C_diag_i, C_offd_i.
*
* Arrays needed: (17, all HYPRE_Int*)
* rownnz_A,
* A_diag_i, A_diag_j,
* A_offd_i, A_offd_j,
* B_diag_i, B_diag_j,
* B_offd_i, B_offd_j,
* B_ext_i, B_ext_j,
* col_map_offd_B, col_map_offd_B,
* B_offd_i, B_offd_j,
* B_ext_i, B_ext_j.
*
* Scalars computed: C_diag_size, C_offd_size.
*
* Scalars needed:
* num_rownnz_A, num_rows_diag_A, num_cols_offd_A, allsquare,
* first_col_diag_B, num_cols_diag_B, num_cols_offd_B, num_cols_offd_C
*--------------------------------------------------------------------------*/
void
hypre_ParMatmul_RowSizes( HYPRE_MemoryLocation memory_location,
HYPRE_Int **C_diag_i,
HYPRE_Int **C_offd_i,
HYPRE_Int *rownnz_A,
HYPRE_Int *A_diag_i,
HYPRE_Int *A_diag_j,
HYPRE_Int *A_offd_i,
HYPRE_Int *A_offd_j,
HYPRE_Int *B_diag_i,
HYPRE_Int *B_diag_j,
HYPRE_Int *B_offd_i,
HYPRE_Int *B_offd_j,
HYPRE_Int *B_ext_diag_i,
HYPRE_Int *B_ext_diag_j,
HYPRE_Int *B_ext_offd_i,
HYPRE_Int *B_ext_offd_j,
HYPRE_Int *map_B_to_C,
HYPRE_Int *C_diag_size,
HYPRE_Int *C_offd_size,
HYPRE_Int num_rownnz_A,
HYPRE_Int num_rows_diag_A,
HYPRE_Int num_cols_offd_A,
HYPRE_Int allsquare,
HYPRE_Int num_cols_diag_B,
HYPRE_Int num_cols_offd_B,
HYPRE_Int num_cols_offd_C )
{
HYPRE_Int *jj_count_diag_array;
HYPRE_Int *jj_count_offd_array;
HYPRE_Int start_indexing = 0; /* start indexing for C_data at 0 */
HYPRE_Int num_threads = hypre_NumThreads();
*C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location);
*C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location);
jj_count_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
jj_count_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Loop over rows of A
*-----------------------------------------------------------------------*/
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int jj_row_begin_diag, jj_count_diag;
HYPRE_Int jj_row_begin_offd, jj_count_offd;
HYPRE_Int i1, ii1, i2, i3, jj2, jj3;
HYPRE_Int size, rest, num_threads;
HYPRE_Int ii, ns, ne;
num_threads = hypre_NumActiveThreads();
size = num_rownnz_A / num_threads;
rest = num_rownnz_A - size * num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
jj_count_diag = start_indexing;
jj_count_offd = start_indexing;
if (num_cols_diag_B || num_cols_offd_C)
{
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++)
{
B_marker[i1] = -1;
}
for (i1 = ns; i1 < ne; i1++)
{
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (rownnz_A)
{
ii1 = rownnz_A[i1];
}
else
{
ii1 = i1;
/*--------------------------------------------------------------------
* Set marker for diagonal entry, C_{i1,i1} (for square matrices).
*--------------------------------------------------------------------*/
if (allsquare)
{
B_marker[i1] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2 + 1]; jj3++)
{
i3 = num_cols_diag_B + B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2 + 1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2 + 1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
jj_count_diag++;
}
}
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_offd.
*-----------------------------------------------------------*/
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2 + 1]; jj3++)
{
i3 = num_cols_diag_B + map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, mark it and increment
* counter.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
jj_count_offd++;
}
}
}
}
/*--------------------------------------------------------------------
* Set C_diag_i and C_offd_i for this row.
*--------------------------------------------------------------------*/
(*C_diag_i)[ii1] = jj_row_begin_diag;
(*C_offd_i)[ii1] = jj_row_begin_offd;
}
jj_count_diag_array[ii] = jj_count_diag;
jj_count_offd_array[ii] = jj_count_offd;
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/* Correct diag_i and offd_i - phase 1 */
if (ii)
{
jj_count_diag = jj_count_diag_array[0];
jj_count_offd = jj_count_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
jj_count_diag += jj_count_diag_array[i1];
jj_count_offd += jj_count_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
ii1 = rownnz_A ? rownnz_A[i1] : i1;
(*C_diag_i)[ii1] += jj_count_diag;
(*C_offd_i)[ii1] += jj_count_offd;
}
}
else
{
(*C_diag_i)[num_rows_diag_A] = 0;
(*C_offd_i)[num_rows_diag_A] = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
(*C_diag_i)[num_rows_diag_A] += jj_count_diag_array[i1];
(*C_offd_i)[num_rows_diag_A] += jj_count_offd_array[i1];
}
}
/* Correct diag_i and offd_i - phase 2 */
if (rownnz_A != NULL)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i1 = ns; i1 < (ne - 1); i1++)
{
for (ii1 = rownnz_A[i1] + 1; ii1 < rownnz_A[i1 + 1]; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[i1 + 1]];
(*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[i1 + 1]];
}
}
if (ii < (num_threads - 1))
{
for (ii1 = rownnz_A[ne - 1] + 1; ii1 < rownnz_A[ne]; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[rownnz_A[ne]];
(*C_offd_i)[ii1] = (*C_offd_i)[rownnz_A[ne]];
}
}
else
{
for (ii1 = rownnz_A[ne - 1] + 1; ii1 < num_rows_diag_A; ii1++)
{
(*C_diag_i)[ii1] = (*C_diag_i)[num_rows_diag_A];
(*C_offd_i)[ii1] = (*C_offd_i)[num_rows_diag_A];
}
}
}
} /* end parallel loop */
*C_diag_size = (*C_diag_i)[num_rows_diag_A];
*C_offd_size = (*C_offd_i)[num_rows_diag_A];
#ifdef HYPRE_DEBUG
HYPRE_Int i;
for (i = 0; i < num_rows_diag_A; i++)
{
hypre_assert((*C_diag_i)[i] <= (*C_diag_i)[i + 1]);
hypre_assert((*C_offd_i)[i] <= (*C_offd_i)[i + 1]);
}
#endif
hypre_TFree(jj_count_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(jj_count_offd_array, HYPRE_MEMORY_HOST);
/* End of First Pass */
}
/*--------------------------------------------------------------------------
* hypre_ParMatmul:
*
* Multiplies two ParCSRMatrices A and B and returns the product in
* ParCSRMatrix C.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B )
{
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] -= hypre_MPI_Wtime();
#endif
/* ParCSRMatrix A */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts_A = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_rownnz_A;
HYPRE_Int *rownnz_A = NULL;
/* ParCSRMatrix B */
HYPRE_BigInt nrows_B = hypre_ParCSRMatrixGlobalNumRows(B);
HYPRE_BigInt ncols_B = hypre_ParCSRMatrixGlobalNumCols(B);
HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_BigInt last_col_diag_B;
/* A_diag */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_diag_ir = hypre_CSRMatrixRownnz(A_diag);
HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
/* A_offd */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int *A_offd_ir = hypre_CSRMatrixRownnz(A_offd);
HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd);
HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
/* B_diag */
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
/* B_offd */
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
/* ParCSRMatrix C */
hypre_ParCSRMatrix *C;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int *map_B_to_C = NULL;
/* C_diag */
hypre_CSRMatrix *C_diag;
HYPRE_Complex *C_diag_data;
HYPRE_Int *C_diag_i;
HYPRE_Int *C_diag_j;
HYPRE_Int C_offd_size;
HYPRE_Int num_cols_offd_C = 0;
/* C_offd */
hypre_CSRMatrix *C_offd;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_Int C_diag_size;
/* Bs_ext */
hypre_CSRMatrix *Bs_ext;
HYPRE_Complex *Bs_ext_data;
HYPRE_Int *Bs_ext_i;
HYPRE_BigInt *Bs_ext_j;
HYPRE_Complex *B_ext_diag_data;
HYPRE_Int *B_ext_diag_i;
HYPRE_Int *B_ext_diag_j;
HYPRE_Int B_ext_diag_size;
HYPRE_Complex *B_ext_offd_data;
HYPRE_Int *B_ext_offd_i;
HYPRE_Int *B_ext_offd_j;
HYPRE_BigInt *B_big_offd_j = NULL;
HYPRE_Int B_ext_offd_size;
HYPRE_Int allsquare = 0;
HYPRE_Int num_procs;
HYPRE_Int *my_diag_array;
HYPRE_Int *my_offd_array;
HYPRE_Int max_num_threads;
HYPRE_Complex zero = 0.0;
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
HYPRE_ANNOTATE_FUNC_BEGIN;
max_num_threads = hypre_NumThreads();
my_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
my_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
if (ncols_A != nrows_B || num_cols_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Incompatible matrix dimensions!\n");
HYPRE_ANNOTATE_FUNC_END;
return NULL;
}
/* if C=A*B is square globally and locally, then C_diag should be square also */
if ( num_rows_diag_A == num_cols_diag_B && nrows_A == ncols_B )
{
allsquare = 1;
}
/* Set rownnz of A */
if (num_rownnz_diag_A != num_rows_diag_A &&
num_rownnz_offd_A != num_rows_offd_A )
{
hypre_MergeOrderedArrays(num_rownnz_diag_A, A_diag_ir,
num_rownnz_offd_A, A_offd_ir,
&num_rownnz_A, &rownnz_A);
}
else
{
num_rownnz_A = hypre_max(num_rows_diag_A, num_rows_offd_A);
}
/*-----------------------------------------------------------------------
* Extract B_ext, i.e. portion of B that is stored on neighbor procs
* and needed locally for matrix matrix product
*-----------------------------------------------------------------------*/
hypre_MPI_Comm_size(comm, &num_procs);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] -= hypre_MPI_Wtime();
#endif
if (num_procs > 1)
{
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings within
* hypre_ParCSRMatrixExtractBExt
*--------------------------------------------------------------------*/
Bs_ext = hypre_ParCSRMatrixExtractBExt(B, A, 1);
Bs_ext_data = hypre_CSRMatrixData(Bs_ext);
Bs_ext_i = hypre_CSRMatrixI(Bs_ext);
Bs_ext_j = hypre_CSRMatrixBigJ(Bs_ext);
}
B_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST);
B_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A + 1, HYPRE_MEMORY_HOST);
B_ext_diag_size = 0;
B_ext_offd_size = 0;
last_col_diag_B = first_col_diag_B + (HYPRE_BigInt) num_cols_diag_B - 1;
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
hypre_UnorderedBigIntSet set;
#pragma omp parallel
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A / num_threads;
rest = num_cols_offd_A - size * num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i = ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#pragma omp barrier
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
hypre_UnorderedBigIntSetCreate(&set, B_ext_offd_size + num_cols_offd_B, 16 * hypre_NumThreads());
}
#pragma omp barrier
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
hypre_UnorderedBigIntSetPut(&set, Bs_ext_j[j]);
B_big_offd_j[cnt_offd] = Bs_ext_j[j];
//Bs_ext_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B);
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_B);
for (i = i_begin; i < i_end; i++)
{
hypre_UnorderedBigIntSetPut(&set, col_map_offd_B[i]);
}
} /* omp parallel */
col_map_offd_C = hypre_UnorderedBigIntSetCopyToArray(&set, &num_cols_offd_C);
hypre_UnorderedBigIntSetDestroy(&set);
hypre_UnorderedBigIntMap col_map_offd_C_inverse;
hypre_big_sort_and_create_inverse_map(col_map_offd_C,
num_cols_offd_C,
&col_map_offd_C,
&col_map_offd_C_inverse);
HYPRE_Int i, j;
#pragma omp parallel for private(j) HYPRE_SMP_SCHEDULE
for (i = 0; i < num_cols_offd_A; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++)
{
//B_ext_offd_j[j] = hypre_UnorderedIntMapGet(&col_map_offd_C_inverse, B_ext_offd_j[j]);
B_ext_offd_j[j] = hypre_UnorderedBigIntMapGet(&col_map_offd_C_inverse, B_big_offd_j[j]);
}
}
if (num_cols_offd_C)
{
hypre_UnorderedBigIntMapDestroy(&col_map_offd_C_inverse);
}
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_B)
{
HYPRE_Int i;
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
#pragma omp parallel private(i)
{
HYPRE_Int i_begin, i_end;
hypre_GetSimpleThreadPartition(&i_begin, &i_end, num_cols_offd_C);
HYPRE_Int cnt;
if (i_end > i_begin)
{
cnt = hypre_BigLowerBound(col_map_offd_B,
col_map_offd_B + (HYPRE_BigInt)num_cols_offd_B,
col_map_offd_C[i_begin]) - col_map_offd_B;
}
for (i = i_begin; i < i_end && cnt < num_cols_offd_B; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
}
}
}
}
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
#else /* !HYPRE_CONCURRENT_HOPSCOTCH */
HYPRE_BigInt *temp;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int size, rest, ii;
HYPRE_Int ns, ne;
HYPRE_Int i1, i, j;
HYPRE_Int my_offd_size, my_diag_size;
HYPRE_Int cnt_offd, cnt_diag;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_offd_A / num_threads;
rest = num_cols_offd_A - size * num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
my_diag_size = 0;
my_offd_size = 0;
for (i = ns; i < ne; i++)
{
B_ext_diag_i[i] = my_diag_size;
B_ext_offd_i[i] = my_offd_size;
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
my_offd_size++;
}
else
{
my_diag_size++;
}
}
}
my_diag_array[ii] = my_diag_size;
my_offd_array[ii] = my_offd_size;
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii)
{
my_diag_size = my_diag_array[0];
my_offd_size = my_offd_array[0];
for (i1 = 1; i1 < ii; i1++)
{
my_diag_size += my_diag_array[i1];
my_offd_size += my_offd_array[i1];
}
for (i1 = ns; i1 < ne; i1++)
{
B_ext_diag_i[i1] += my_diag_size;
B_ext_offd_i[i1] += my_offd_size;
}
}
else
{
B_ext_diag_size = 0;
B_ext_offd_size = 0;
for (i1 = 0; i1 < num_threads; i1++)
{
B_ext_diag_size += my_diag_array[i1];
B_ext_offd_size += my_offd_array[i1];
}
B_ext_diag_i[num_cols_offd_A] = B_ext_diag_size;
B_ext_offd_i[num_cols_offd_A] = B_ext_offd_size;
if (B_ext_diag_size)
{
B_ext_diag_j = hypre_CTAlloc(HYPRE_Int, B_ext_diag_size, HYPRE_MEMORY_HOST);
B_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, B_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size)
{
B_ext_offd_j = hypre_CTAlloc(HYPRE_Int, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_big_offd_j = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size, HYPRE_MEMORY_HOST);
B_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, B_ext_offd_size, HYPRE_MEMORY_HOST);
}
if (B_ext_offd_size || num_cols_offd_B)
{
temp = hypre_CTAlloc(HYPRE_BigInt, B_ext_offd_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
cnt_offd = B_ext_offd_i[ns];
cnt_diag = B_ext_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = Bs_ext_i[i]; j < Bs_ext_i[i + 1]; j++)
{
if (Bs_ext_j[j] < first_col_diag_B ||
Bs_ext_j[j] > last_col_diag_B)
{
temp[cnt_offd] = Bs_ext_j[j];
B_big_offd_j[cnt_offd] = Bs_ext_j[j];
//Bs_ext_j[cnt_offd] = Bs_ext_j[j];
B_ext_offd_data[cnt_offd++] = Bs_ext_data[j];
}
else
{
B_ext_diag_j[cnt_diag] = (HYPRE_Int)(Bs_ext_j[j] - first_col_diag_B);
B_ext_diag_data[cnt_diag++] = Bs_ext_data[j];
}
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
HYPRE_Int cnt;
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(Bs_ext);
Bs_ext = NULL;
}
cnt = 0;
if (B_ext_offd_size || num_cols_offd_B)
{
cnt = B_ext_offd_size;
for (i = 0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
HYPRE_BigInt value;
hypre_BigQsort0(temp, 0, cnt - 1);
num_cols_offd_C = 1;
value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = ns; i < ne; i++)
{
for (j = B_ext_offd_i[i]; j < B_ext_offd_i[i + 1]; j++)
{
B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, B_big_offd_j[j],
//B_ext_offd_j[j] = hypre_BigBinarySearch(col_map_offd_C, Bs_ext_j[j],
num_cols_offd_C);
}
}
} /* end parallel region */
hypre_TFree(B_big_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(my_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(my_offd_array, HYPRE_MEMORY_HOST);
if (num_cols_offd_B)
{
HYPRE_Int i, cnt;
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) { break; }
}
}
}
#endif /* !HYPRE_CONCURRENT_HOPSCOTCH */
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_RENUMBER_COLIDX] += hypre_MPI_Wtime();
#endif
HYPRE_ANNOTATE_REGION_BEGIN("%s", "First pass");
hypre_ParMatmul_RowSizes(memory_location_C, &C_diag_i, &C_offd_i,
rownnz_A, A_diag_i, A_diag_j,
A_offd_i, A_offd_j,
B_diag_i, B_diag_j,
B_offd_i, B_offd_j,
B_ext_diag_i, B_ext_diag_j,
B_ext_offd_i, B_ext_offd_j, map_B_to_C,
&C_diag_size, &C_offd_size,
num_rownnz_A, num_rows_diag_A, num_cols_offd_A,
allsquare, num_cols_diag_B, num_cols_offd_B,
num_cols_offd_C);
HYPRE_ANNOTATE_REGION_END("%s", "First pass");
/*-----------------------------------------------------------------------
* Allocate C_diag_data and C_diag_j arrays.
* Allocate C_offd_data and C_offd_j arrays.
*-----------------------------------------------------------------------*/
last_col_diag_B = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1;
C_diag_data = hypre_CTAlloc(HYPRE_Complex, C_diag_size, memory_location_C);
C_diag_j = hypre_CTAlloc(HYPRE_Int, C_diag_size, memory_location_C);
if (C_offd_size)
{
C_offd_data = hypre_CTAlloc(HYPRE_Complex, C_offd_size, memory_location_C);
C_offd_j = hypre_CTAlloc(HYPRE_Int, C_offd_size, memory_location_C);
}
/*-----------------------------------------------------------------------
* Second Pass: Fill in C_diag_data and C_diag_j.
* Second Pass: Fill in C_offd_data and C_offd_j.
*-----------------------------------------------------------------------*/
HYPRE_ANNOTATE_REGION_BEGIN("%s", "Second pass");
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int ns, ne, size, rest, ii;
HYPRE_Int i1, ii1, i2, i3, jj2, jj3;
HYPRE_Int jj_row_begin_diag, jj_count_diag;
HYPRE_Int jj_row_begin_offd, jj_count_offd;
HYPRE_Int num_threads;
HYPRE_Complex a_entry; /*, a_b_product;*/
num_threads = hypre_NumActiveThreads();
size = num_rownnz_A / num_threads;
rest = num_rownnz_A - size * num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
jj_count_diag = C_diag_i[rownnz_A ? rownnz_A[ns] : ns];
jj_count_offd = C_offd_i[rownnz_A ? rownnz_A[ns] : ns];
if (num_cols_diag_B || num_cols_offd_C)
{
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B + num_cols_offd_C,
HYPRE_MEMORY_HOST);
for (i1 = 0; i1 < num_cols_diag_B + num_cols_offd_C; i1++)
{
B_marker[i1] = -1;
}
}
/*-----------------------------------------------------------------------
* Loop over interior c-points.
*-----------------------------------------------------------------------*/
for (i1 = ns; i1 < ne; i1++)
{
jj_row_begin_diag = jj_count_diag;
jj_row_begin_offd = jj_count_offd;
if (rownnz_A)
{
ii1 = rownnz_A[i1];
}
else
{
ii1 = i1;
/*--------------------------------------------------------------------
* Create diagonal entry, C_{i1,i1}
*--------------------------------------------------------------------*/
if (allsquare)
{
B_marker[i1] = jj_count_diag;
C_diag_data[jj_count_diag] = zero;
C_diag_j[jj_count_diag] = i1;
jj_count_diag++;
}
}
/*-----------------------------------------------------------------
* Loop over entries in row i1 of A_offd.
*-----------------------------------------------------------------*/
if (num_cols_offd_A)
{
for (jj2 = A_offd_i[ii1]; jj2 < A_offd_i[ii1 + 1]; jj2++)
{
i2 = A_offd_j[jj2];
a_entry = A_offd_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_ext.
*-----------------------------------------------------------*/
for (jj3 = B_ext_offd_i[i2]; jj3 < B_ext_offd_i[i2 + 1]; jj3++)
{
i3 = num_cols_diag_B + B_ext_offd_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry * B_ext_offd_data[jj3];
C_offd_j[jj_count_offd] = i3 - num_cols_diag_B;
jj_count_offd++;
}
else
{
C_offd_data[B_marker[i3]] += a_entry * B_ext_offd_data[jj3];
}
}
for (jj3 = B_ext_diag_i[i2]; jj3 < B_ext_diag_i[i2 + 1]; jj3++)
{
i3 = B_ext_diag_j[jj3];
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry * B_ext_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
{
C_diag_data[B_marker[i3]] += a_entry * B_ext_diag_data[jj3];
}
}
}
}
/*-----------------------------------------------------------------
* Loop over entries in row ii1 of A_diag.
*-----------------------------------------------------------------*/
for (jj2 = A_diag_i[ii1]; jj2 < A_diag_i[ii1 + 1]; jj2++)
{
i2 = A_diag_j[jj2];
a_entry = A_diag_data[jj2];
/*-----------------------------------------------------------
* Loop over entries in row i2 of B_diag.
*-----------------------------------------------------------*/
for (jj3 = B_diag_i[i2]; jj3 < B_diag_i[i2 + 1]; jj3++)
{
i3 = B_diag_j[jj3];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_diag)
{
B_marker[i3] = jj_count_diag;
C_diag_data[jj_count_diag] = a_entry * B_diag_data[jj3];
C_diag_j[jj_count_diag] = i3;
jj_count_diag++;
}
else
{
C_diag_data[B_marker[i3]] += a_entry * B_diag_data[jj3];
}
}
if (num_cols_offd_B)
{
for (jj3 = B_offd_i[i2]; jj3 < B_offd_i[i2 + 1]; jj3++)
{
i3 = num_cols_diag_B + map_B_to_C[B_offd_j[jj3]];
/*--------------------------------------------------------
* Check B_marker to see that C_{ii1,i3} has not already
* been accounted for. If it has not, create a new entry.
* If it has, add new contribution.
*--------------------------------------------------------*/
if (B_marker[i3] < jj_row_begin_offd)
{
B_marker[i3] = jj_count_offd;
C_offd_data[jj_count_offd] = a_entry * B_offd_data[jj3];
C_offd_j[jj_count_offd] = i3 - num_cols_diag_B;
jj_count_offd++;
}
else
{
C_offd_data[B_marker[i3]] += a_entry * B_offd_data[jj3];
}
}
}
}
}
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
} /*end parallel region */
HYPRE_ANNOTATE_REGION_END("%s", "Second pass");
C = hypre_ParCSRMatrixCreate(comm, nrows_A, ncols_B, row_starts_A,
col_starts_B, num_cols_offd_C,
C_diag_size, C_offd_size);
C_diag = hypre_ParCSRMatrixDiag(C);
hypre_CSRMatrixData(C_diag) = C_diag_data;
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixJ(C_diag) = C_diag_j;
hypre_CSRMatrixSetRownnz(C_diag);
C_offd = hypre_ParCSRMatrixOffd(C);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_ParCSRMatrixOffd(C) = C_offd;
if (num_cols_offd_C)
{
hypre_CSRMatrixData(C_offd) = C_offd_data;
hypre_CSRMatrixJ(C_offd) = C_offd_j;
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
}
hypre_CSRMatrixSetRownnz(C_offd);
hypre_CSRMatrixMemoryLocation(C_diag) = memory_location_C;
hypre_CSRMatrixMemoryLocation(C_offd) = memory_location_C;
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
hypre_TFree(B_ext_diag_i, HYPRE_MEMORY_HOST);
if (B_ext_diag_size)
{
hypre_TFree(B_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_diag_data, HYPRE_MEMORY_HOST);
}
hypre_TFree(B_ext_offd_i, HYPRE_MEMORY_HOST);
if (B_ext_offd_size)
{
hypre_TFree(B_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(B_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_B)
{
hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST);
}
hypre_TFree(rownnz_A, HYPRE_MEMORY_HOST);
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MATMUL] += hypre_MPI_Wtime();
#endif
HYPRE_ANNOTATE_FUNC_END;
return C;
}
/* The following function was formerly part of hypre_ParCSRMatrixExtractBExt
but the code was removed so it can be used for a corresponding function
for Boolean matrices
JSP: to allow communication overlapping, it returns comm_handle_idx and
comm_handle_data. Before accessing B, they should be destroyed (including
send_data contained in the comm_handle).
*/
void hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
HYPRE_Int ** pB_ext_i,
HYPRE_BigInt ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_BigInt ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_BigInt * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, /* 1 if only coarse points are needed */
HYPRE_Int skip_same_sign /* 1 if only points that have the same sign are needed */
// extended based long range interpolation: skip_fine = 1, skip_same_sign = 0 for S matrix, skip_fine = 1, skip_same_sign = 1 for A matrix
// other interpolation: skip_fine = 0, skip_same_sign = 0
)
{
hypre_ParCSRCommHandle *comm_handle, *row_map_comm_handle = NULL;
hypre_ParCSRCommPkg *tmp_comm_pkg;
HYPRE_Int *B_int_i;
HYPRE_BigInt *B_int_j;
HYPRE_Int *B_ext_i;
HYPRE_BigInt * B_ext_j;
HYPRE_Complex * B_ext_data;
HYPRE_Complex * B_int_data;
HYPRE_BigInt * B_int_row_map;
HYPRE_BigInt * B_ext_row_map;
HYPRE_Int num_procs, my_id;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i, j, k;
HYPRE_Int start_index;
/*HYPRE_Int jrow;*/
HYPRE_Int num_rows_B_ext;
HYPRE_Int *prefix_sum_workspace;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
HYPRE_BigInt first_row_index = row_starts[0];
num_rows_B_ext = recv_vec_starts[num_recvs];
if ( num_rows_B_ext < 0 ) /* no B_ext, no communication */
{
*pB_ext_i = NULL;
*pB_ext_j = NULL;
if ( data ) { *pB_ext_data = NULL; }
if ( find_row_map ) { *pB_ext_row_map = NULL; }
*num_nonzeros = 0;
return;
};
B_int_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends] + 1, HYPRE_MEMORY_HOST);
B_ext_i = hypre_CTAlloc(HYPRE_Int, num_rows_B_ext + 1, HYPRE_MEMORY_HOST);
*pB_ext_i = B_ext_i;
if ( find_row_map )
{
B_int_row_map = hypre_CTAlloc( HYPRE_BigInt, send_map_starts[num_sends] + 1, HYPRE_MEMORY_HOST);
B_ext_row_map = hypre_CTAlloc( HYPRE_BigInt, num_rows_B_ext + 1, HYPRE_MEMORY_HOST);
*pB_ext_row_map = B_ext_row_map;
};
/*--------------------------------------------------------------------------
* generate B_int_i through adding number of row-elements of offd and diag
* for corresponding rows. B_int_i[j+1] contains the number of elements of
* a row j (which is determined through send_map_elmts)
*--------------------------------------------------------------------------*/
jdata_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
jdata_send_map_starts[0] = B_int_i[0] = 0;
/*HYPRE_Int prefix_sum_workspace[(hypre_NumThreads() + 1)*num_sends];*/
prefix_sum_workspace = hypre_TAlloc(HYPRE_Int, (hypre_NumThreads() + 1) * num_sends,
HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,k)
#endif
{
/*HYPRE_Int counts[num_sends];*/
HYPRE_Int *counts;
counts = hypre_TAlloc(HYPRE_Int, num_sends, HYPRE_MEMORY_HOST);
for (i = 0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = 0;
if (skip_fine && skip_same_sign)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0) { len++; }
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (offd_data[k] < 0) { len++; }
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0) { len++; }
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (offd_data[k] > 0) { len++; }
}
}
B_int_i[j + 1] = len;
count += len;
}
}
else if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = 0;
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0) { len++; }
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0) { len++; }
}
B_int_i[j + 1] = len;
count += len;
}
}
else
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
HYPRE_Int len = diag_i[jrow + 1] - diag_i[jrow];
len += offd_i[jrow + 1] - offd_i[jrow];
B_int_i[j + 1] = len;
count += len;
}
}
if (find_row_map)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
B_int_row_map[j] = (HYPRE_BigInt)jrow + first_row_index;
}
}
counts[i] = count;
}
hypre_prefix_sum_multiple(counts, jdata_send_map_starts + 1, num_sends, prefix_sum_workspace);
#ifdef HYPRE_USING_OPENMP
#pragma omp master
#endif
{
for (i = 1; i < num_sends; i++)
{
jdata_send_map_starts[i + 1] += jdata_send_map_starts[i];
}
/*--------------------------------------------------------------------------
* initialize communication
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg,
&B_int_i[1], &(B_ext_i[1]) );
if ( find_row_map )
{
/* scatter/gather B_int row numbers to form array of B_ext row numbers */
row_map_comm_handle = hypre_ParCSRCommHandleCreate
(21, comm_pkg, B_int_row_map, B_ext_row_map );
}
B_int_j = hypre_TAlloc(HYPRE_BigInt, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
if (data) { B_int_data = hypre_TAlloc(HYPRE_Complex, jdata_send_map_starts[num_sends], HYPRE_MEMORY_HOST); }
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
for (i = 0; i < num_sends; i++)
{
HYPRE_Int j_begin, j_end;
hypre_GetSimpleThreadPartition(&j_begin, &j_end, send_map_starts[i + 1] - send_map_starts[i]);
j_begin += send_map_starts[i];
j_end += send_map_starts[i];
HYPRE_Int count = counts[i] + jdata_send_map_starts[i];
if (data)
{
if (skip_same_sign && skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
/*HYPRE_Int count_begin = count;*/
if (diag_data[diag_i[jrow]] >= 0)
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] < 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_BigInt c_global = col_map_offd[c];
if (offd_data[k] < 0)
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
else
{
for (k = diag_i[jrow] + 1; k < diag_i[jrow + 1]; k++)
{
if (diag_data[k] > 0 && CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
HYPRE_Int c = offd_j[k];
HYPRE_BigInt c_global = col_map_offd[c];
if (offd_data[k] > 0)
{
B_int_j[count] = c_global;
B_int_data[count] = offd_data[k];
count++;
}
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j)
{
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
B_int_data[count] = diag_data[k];
count++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
B_int_data[count] = offd_data[k];
count++;
}
}
}
} // data
else
{
if (skip_fine)
{
for (j = j_begin; j < j_end; j++)
{
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
if (CF_marker[diag_j[k]] >= 0)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
count++;
}
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
if (CF_marker_offd[offd_j[k]] >= 0)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
}
else
{
for (j = j_begin; j < j_end; ++j)
{
HYPRE_Int jrow = send_map_elmts[j];
for (k = diag_i[jrow]; k < diag_i[jrow + 1]; k++)
{
B_int_j[count] = (HYPRE_BigInt)diag_j[k] + first_col_diag;
count++;
}
for (k = offd_i[jrow]; k < offd_i[jrow + 1]; k++)
{
B_int_j[count] = col_map_offd[offd_j[k]];
count++;
}
}
}
} // !data
} /* for each send target */
hypre_TFree(counts, HYPRE_MEMORY_HOST);
} /* omp parallel. JSP: this takes most of time in this function */
hypre_TFree(prefix_sum_workspace, HYPRE_MEMORY_HOST);
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) =
hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = jdata_send_map_starts;
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
/*--------------------------------------------------------------------------
* after communication exchange B_ext_i[j+1] contains the number of elements
* of a row j !
* evaluate B_ext_i and compute *num_nonzeros for B_ext
*--------------------------------------------------------------------------*/
for (i = 0; i < num_recvs; i++)
{
for (j = recv_vec_starts[i]; j < recv_vec_starts[i + 1]; j++)
{
B_ext_i[j + 1] += B_ext_i[j];
}
}
*num_nonzeros = B_ext_i[num_rows_B_ext];
*pB_ext_j = hypre_TAlloc(HYPRE_BigInt, *num_nonzeros, HYPRE_MEMORY_HOST);
B_ext_j = *pB_ext_j;
if (data)
{
*pB_ext_data = hypre_TAlloc(HYPRE_Complex, *num_nonzeros, HYPRE_MEMORY_HOST);
B_ext_data = *pB_ext_data;
}
for (i = 0; i < num_recvs; i++)
{
start_index = B_ext_i[recv_vec_starts[i]];
*num_nonzeros = B_ext_i[recv_vec_starts[i + 1]] - start_index;
jdata_recv_vec_starts[i + 1] = B_ext_i[recv_vec_starts[i + 1]];
}
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = jdata_recv_vec_starts;
*comm_handle_idx = hypre_ParCSRCommHandleCreate(21, tmp_comm_pkg, B_int_j, B_ext_j);
if (data)
{
*comm_handle_data = hypre_ParCSRCommHandleCreate(1, tmp_comm_pkg, B_int_data,
B_ext_data);
}
if (row_map_comm_handle)
{
hypre_ParCSRCommHandleDestroy(row_map_comm_handle);
row_map_comm_handle = NULL;
}
hypre_TFree(jdata_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(jdata_recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
hypre_TFree(B_int_i, HYPRE_MEMORY_HOST);
if ( find_row_map ) { hypre_TFree(B_int_row_map, HYPRE_MEMORY_HOST); }
/* end generic part */
}
void hypre_ParCSRMatrixExtractBExt_Arrays(
HYPRE_Int ** pB_ext_i,
HYPRE_BigInt ** pB_ext_j,
HYPRE_Complex ** pB_ext_data,
HYPRE_BigInt ** pB_ext_row_map,
HYPRE_Int * num_nonzeros,
HYPRE_Int data,
HYPRE_Int find_row_map,
MPI_Comm comm,
hypre_ParCSRCommPkg * comm_pkg,
HYPRE_Int num_cols_B,
HYPRE_Int num_recvs,
HYPRE_Int num_sends,
HYPRE_BigInt first_col_diag,
HYPRE_BigInt * row_starts,
HYPRE_Int * recv_vec_starts,
HYPRE_Int * send_map_starts,
HYPRE_Int * send_map_elmts,
HYPRE_Int * diag_i,
HYPRE_Int * diag_j,
HYPRE_Int * offd_i,
HYPRE_Int * offd_j,
HYPRE_BigInt * col_map_offd,
HYPRE_Real * diag_data,
HYPRE_Real * offd_data
)
{
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap(
pB_ext_i, pB_ext_j, pB_ext_data, pB_ext_row_map, num_nonzeros,
data, find_row_map, comm, comm_pkg, num_cols_B, num_recvs, num_sends,
first_col_diag, row_starts, recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd, diag_data, offd_data,
&comm_handle_idx, &comm_handle_data,
NULL, NULL,
0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
if (data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
}
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixExtractBExt : extracts rows from B which are located on
* other processors and needed for multiplication with A locally. The rows
* are returned as CSRMatrix.
*--------------------------------------------------------------------------*/
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt_Overlap( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int data,
hypre_ParCSRCommHandle **comm_handle_idx,
hypre_ParCSRCommHandle **comm_handle_data,
HYPRE_Int *CF_marker, HYPRE_Int *CF_marker_offd,
HYPRE_Int skip_fine, HYPRE_Int skip_same_sign )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
HYPRE_BigInt first_col_diag = hypre_ParCSRMatrixFirstColDiag(B);
/*HYPRE_Int first_row_index = hypre_ParCSRMatrixFirstRowIndex(B);*/
HYPRE_BigInt *col_map_offd = hypre_ParCSRMatrixColMapOffd(B);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
HYPRE_Int num_recvs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int num_sends;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
hypre_CSRMatrix *diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *diag_i = hypre_CSRMatrixI(diag);
HYPRE_Int *diag_j = hypre_CSRMatrixJ(diag);
HYPRE_Real *diag_data = hypre_CSRMatrixData(diag);
hypre_CSRMatrix *offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *offd_i = hypre_CSRMatrixI(offd);
HYPRE_Int *offd_j = hypre_CSRMatrixJ(offd);
HYPRE_Real *offd_data = hypre_CSRMatrixData(offd);
HYPRE_Int num_cols_B, num_nonzeros;
HYPRE_Int num_rows_B_ext;
hypre_CSRMatrix *B_ext;
HYPRE_Int *B_ext_i;
HYPRE_BigInt *B_ext_j;
HYPRE_Complex *B_ext_data;
HYPRE_BigInt *idummy;
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B);
num_rows_B_ext = recv_vec_starts[num_recvs];
hypre_ParCSRMatrixExtractBExt_Arrays_Overlap
( &B_ext_i, &B_ext_j, &B_ext_data, &idummy,
&num_nonzeros,
data, 0, comm, comm_pkg,
num_cols_B, num_recvs, num_sends,
first_col_diag, B->row_starts,
recv_vec_starts, send_map_starts, send_map_elmts,
diag_i, diag_j, offd_i, offd_j, col_map_offd,
diag_data, offd_data,
comm_handle_idx, comm_handle_data,
CF_marker, CF_marker_offd,
skip_fine, skip_same_sign
);
B_ext = hypre_CSRMatrixCreate(num_rows_B_ext, num_cols_B, num_nonzeros);
hypre_CSRMatrixMemoryLocation(B_ext) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(B_ext) = B_ext_i;
hypre_CSRMatrixBigJ(B_ext) = B_ext_j;
if (data) { hypre_CSRMatrixData(B_ext) = B_ext_data; }
return B_ext;
}
hypre_CSRMatrix *
hypre_ParCSRMatrixExtractBExt( hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix *A,
HYPRE_Int want_data )
{
#if 0
hypre_ParCSRCommHandle *comm_handle_idx, *comm_handle_data;
hypre_CSRMatrix *B_ext = hypre_ParCSRMatrixExtractBExt_Overlap(B, A, want_data, &comm_handle_idx,
&comm_handle_data, NULL, NULL, 0, 0);
HYPRE_Int *send_idx = (HYPRE_Int *)comm_handle_idx->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_idx);
hypre_TFree(send_idx, HYPRE_MEMORY_HOST);
if (want_data)
{
HYPRE_Real *send_data = (HYPRE_Real *)comm_handle_data->send_data;
hypre_ParCSRCommHandleDestroy(comm_handle_data);
hypre_TFree(send_data, HYPRE_MEMORY_HOST);
}
#else
hypre_assert( hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(B)) ==
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(B)) );
hypre_CSRMatrix *B_ext;
void *request;
if (!hypre_ParCSRMatrixCommPkg(A))
{
hypre_MatvecCommPkgCreate(A);
}
hypre_ParcsrGetExternalRowsInit(B,
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(A)),
hypre_ParCSRMatrixColMapOffd(A),
hypre_ParCSRMatrixCommPkg(A),
want_data,
&request);
B_ext = hypre_ParcsrGetExternalRowsWait(request);
#endif
return B_ext;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTransposeHost
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTransposeHost( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int num_cols = hypre_ParCSRMatrixNumCols(A);
HYPRE_BigInt first_row_index = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_BigInt *col_starts = hypre_ParCSRMatrixColStarts(A);
HYPRE_Int num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_Int num_sends, num_recvs, num_cols_offd_AT;
HYPRE_Int i, j, k, index, counter, j_row;
HYPRE_BigInt value;
hypre_ParCSRMatrix *AT;
hypre_CSRMatrix *AT_diag;
hypre_CSRMatrix *AT_offd;
hypre_CSRMatrix *AT_tmp;
HYPRE_BigInt first_row_index_AT, first_col_diag_AT;
HYPRE_Int local_num_rows_AT, local_num_cols_AT;
HYPRE_Int *AT_tmp_i;
HYPRE_Int *AT_tmp_j;
HYPRE_BigInt *AT_big_j = NULL;
HYPRE_Complex *AT_tmp_data;
HYPRE_Int *AT_buf_i;
HYPRE_BigInt *AT_buf_j;
HYPRE_Complex *AT_buf_data;
HYPRE_Int *AT_offd_i;
HYPRE_Int *AT_offd_j;
HYPRE_Complex *AT_offd_data;
HYPRE_BigInt *col_map_offd_AT;
HYPRE_BigInt row_starts_AT[2];
HYPRE_BigInt col_starts_AT[2];
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs, *send_procs;
HYPRE_Int *recv_vec_starts;
HYPRE_Int *send_map_starts;
HYPRE_Int *send_map_elmts;
HYPRE_Int *tmp_recv_vec_starts;
HYPRE_Int *tmp_send_map_starts;
hypre_ParCSRCommPkg *tmp_comm_pkg;
hypre_ParCSRCommHandle *comm_handle;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_cols_offd_AT = 0;
counter = 0;
AT_offd_j = NULL;
AT_offd_data = NULL;
col_map_offd_AT = NULL;
HYPRE_MemoryLocation memory_location = hypre_ParCSRMatrixMemoryLocation(A);
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
if (num_procs > 1)
{
hypre_CSRMatrixTranspose (A_offd, &AT_tmp, data);
AT_tmp_i = hypre_CSRMatrixI(AT_tmp);
AT_tmp_j = hypre_CSRMatrixJ(AT_tmp);
if (data)
{
AT_tmp_data = hypre_CSRMatrixData(AT_tmp);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg);
send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg);
send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg);
AT_buf_i = hypre_CTAlloc(HYPRE_Int, send_map_starts[num_sends], HYPRE_MEMORY_HOST);
if (AT_tmp_i[num_cols_offd])
{
AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_tmp_i[num_cols_offd], HYPRE_MEMORY_HOST);
}
for (i = 0; i < AT_tmp_i[num_cols_offd]; i++)
{
//AT_tmp_j[i] += first_row_index;
AT_big_j[i] = (HYPRE_BigInt)AT_tmp_j[i] + first_row_index;
}
for (i = 0; i < num_cols_offd; i++)
{
AT_tmp_i[i] = AT_tmp_i[i + 1] - AT_tmp_i[i];
}
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg, AT_tmp_i, AT_buf_i);
}
hypre_CSRMatrixTranspose(A_diag, &AT_diag, data);
AT_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols + 1, memory_location);
if (num_procs > 1)
{
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
tmp_send_map_starts = hypre_CTAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
tmp_recv_vec_starts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
tmp_send_map_starts[0] = send_map_starts[0];
for (i = 0; i < num_sends; i++)
{
tmp_send_map_starts[i + 1] = tmp_send_map_starts[i];
for (j = send_map_starts[i]; j < send_map_starts[i + 1]; j++)
{
tmp_send_map_starts[i + 1] += AT_buf_i[j];
AT_offd_i[send_map_elmts[j] + 1] += AT_buf_i[j];
}
}
for (i = 0; i < num_cols; i++)
{
AT_offd_i[i + 1] += AT_offd_i[i];
}
tmp_recv_vec_starts[0] = recv_vec_starts[0];
for (i = 0; i < num_recvs; i++)
{
tmp_recv_vec_starts[i + 1] = tmp_recv_vec_starts[i];
for (j = recv_vec_starts[i]; j < recv_vec_starts[i + 1]; j++)
{
tmp_recv_vec_starts[i + 1] += AT_tmp_i[j];
}
}
tmp_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(tmp_comm_pkg) = comm;
hypre_ParCSRCommPkgNumSends(tmp_comm_pkg) = num_sends;
hypre_ParCSRCommPkgNumRecvs(tmp_comm_pkg) = num_recvs;
hypre_ParCSRCommPkgRecvProcs(tmp_comm_pkg) = recv_procs;
hypre_ParCSRCommPkgSendProcs(tmp_comm_pkg) = send_procs;
hypre_ParCSRCommPkgRecvVecStarts(tmp_comm_pkg) = tmp_recv_vec_starts;
hypre_ParCSRCommPkgSendMapStarts(tmp_comm_pkg) = tmp_send_map_starts;
AT_buf_j = hypre_CTAlloc(HYPRE_BigInt, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
comm_handle = hypre_ParCSRCommHandleCreate(22, tmp_comm_pkg, AT_big_j,
AT_buf_j);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST);
if (data)
{
AT_buf_data = hypre_CTAlloc(HYPRE_Complex, tmp_send_map_starts[num_sends], HYPRE_MEMORY_HOST);
comm_handle = hypre_ParCSRCommHandleCreate(2, tmp_comm_pkg, AT_tmp_data,
AT_buf_data);
hypre_ParCSRCommHandleDestroy(comm_handle);
comm_handle = NULL;
}
hypre_TFree(tmp_recv_vec_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_send_map_starts, HYPRE_MEMORY_HOST);
hypre_TFree(tmp_comm_pkg, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(AT_tmp);
if (AT_offd_i[num_cols])
{
AT_offd_j = hypre_CTAlloc(HYPRE_Int, AT_offd_i[num_cols], memory_location);
AT_big_j = hypre_CTAlloc(HYPRE_BigInt, AT_offd_i[num_cols], HYPRE_MEMORY_HOST);
if (data)
{
AT_offd_data = hypre_CTAlloc(HYPRE_Complex, AT_offd_i[num_cols], memory_location);
}
}
else
{
AT_offd_j = NULL;
AT_offd_data = NULL;
}
counter = 0;
for (i = 0; i < num_sends; i++)
{
for (j = send_map_starts[i]; j < send_map_starts[i + 1]; j++)
{
j_row = send_map_elmts[j];
index = AT_offd_i[j_row];
for (k = 0; k < AT_buf_i[j]; k++)
{
if (data)
{
AT_offd_data[index] = AT_buf_data[counter];
}
AT_big_j[index++] = AT_buf_j[counter++];
}
AT_offd_i[j_row] = index;
}
}
for (i = num_cols; i > 0; i--)
{
AT_offd_i[i] = AT_offd_i[i - 1];
}
AT_offd_i[0] = 0;
if (counter)
{
hypre_BigQsort0(AT_buf_j, 0, counter - 1);
num_cols_offd_AT = 1;
value = AT_buf_j[0];
for (i = 1; i < counter; i++)
{
if (value < AT_buf_j[i])
{
AT_buf_j[num_cols_offd_AT++] = AT_buf_j[i];
value = AT_buf_j[i];
}
}
}
if (num_cols_offd_AT)
{
col_map_offd_AT = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_AT, HYPRE_MEMORY_HOST);
}
else
{
col_map_offd_AT = NULL;
}
for (i = 0; i < num_cols_offd_AT; i++)
{
col_map_offd_AT[i] = AT_buf_j[i];
}
hypre_TFree(AT_buf_i, HYPRE_MEMORY_HOST);
hypre_TFree(AT_buf_j, HYPRE_MEMORY_HOST);
if (data)
{
hypre_TFree(AT_buf_data, HYPRE_MEMORY_HOST);
}
for (i = 0; i < counter; i++)
{
AT_offd_j[i] = hypre_BigBinarySearch(col_map_offd_AT, AT_big_j[i],
num_cols_offd_AT);
}
hypre_TFree(AT_big_j, HYPRE_MEMORY_HOST);
}
AT_offd = hypre_CSRMatrixCreate(num_cols, num_cols_offd_AT, counter);
hypre_CSRMatrixMemoryLocation(AT_offd) = memory_location;
hypre_CSRMatrixI(AT_offd) = AT_offd_i;
hypre_CSRMatrixJ(AT_offd) = AT_offd_j;
hypre_CSRMatrixData(AT_offd) = AT_offd_data;
for (i = 0; i < 2; i++)
{
row_starts_AT[i] = col_starts[i];
col_starts_AT[i] = row_starts[i];
}
first_row_index_AT = row_starts_AT[0];
first_col_diag_AT = col_starts_AT[0];
local_num_rows_AT = (HYPRE_Int)(row_starts_AT[1] - first_row_index_AT );
local_num_cols_AT = (HYPRE_Int)(col_starts_AT[1] - first_col_diag_AT);
AT = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(AT) = comm;
hypre_ParCSRMatrixDiag(AT) = AT_diag;
hypre_ParCSRMatrixOffd(AT) = AT_offd;
hypre_ParCSRMatrixGlobalNumRows(AT) = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_ParCSRMatrixGlobalNumCols(AT) = hypre_ParCSRMatrixGlobalNumRows(A);
hypre_ParCSRMatrixRowStarts(AT)[0] = row_starts_AT[0];
hypre_ParCSRMatrixRowStarts(AT)[1] = row_starts_AT[1];
hypre_ParCSRMatrixColStarts(AT)[0] = col_starts_AT[0];
hypre_ParCSRMatrixColStarts(AT)[1] = col_starts_AT[1];
hypre_ParCSRMatrixColMapOffd(AT) = col_map_offd_AT;
hypre_ParCSRMatrixFirstRowIndex(AT) = first_row_index_AT;
hypre_ParCSRMatrixFirstColDiag(AT) = first_col_diag_AT;
hypre_ParCSRMatrixLastRowIndex(AT) = first_row_index_AT + local_num_rows_AT - 1;
hypre_ParCSRMatrixLastColDiag(AT) = first_col_diag_AT + local_num_cols_AT - 1;
hypre_ParCSRMatrixOwnsData(AT) = 1;
hypre_ParCSRMatrixCommPkg(AT) = NULL;
hypre_ParCSRMatrixCommPkgT(AT) = NULL;
hypre_ParCSRMatrixRowindices(AT) = NULL;
hypre_ParCSRMatrixRowvalues(AT) = NULL;
hypre_ParCSRMatrixGetrowactive(AT) = 0;
hypre_ParCSRMatrixOwnsAssumedPartition(AT) = 1;
*AT_ptr = AT;
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixTranspose
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixTranspose( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix **AT_ptr,
HYPRE_Int data )
{
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ParCSRMatrixTranspose");
#endif
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
hypre_ParCSRMatrixTransposeDevice(A, AT_ptr, data);
}
else
#endif
{
hypre_ParCSRMatrixTransposeHost(A, AT_ptr, data);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return hypre_error_flag;
}
/* -----------------------------------------------------------------------------
* generate a parallel spanning tree (for Maxwell Equation)
* G_csr is the node to edge connectivity matrix
* ----------------------------------------------------------------------------- */
void
hypre_ParCSRMatrixGenSpanningTree( hypre_ParCSRMatrix *G_csr,
HYPRE_Int **indices,
HYPRE_Int G_type )
{
HYPRE_BigInt nrows_G, ncols_G;
HYPRE_Int *G_diag_i, *G_diag_j, *GT_diag_mat, i, j, k, edge;
HYPRE_Int *nodes_marked, *edges_marked, *queue, queue_tail, queue_head, node;
HYPRE_Int mypid, nprocs, n_children, *children, nsends, *send_procs, *recv_cnts;
HYPRE_Int nrecvs, *recv_procs, n_proc_array, *proc_array, *pgraph_i, *pgraph_j;
HYPRE_Int parent, proc, proc2, node2, found, *t_indices, tree_size, *T_diag_i;
HYPRE_Int *T_diag_j, *counts, offset;
MPI_Comm comm;
hypre_ParCSRCommPkg *comm_pkg;
hypre_CSRMatrix *G_diag;
/* fetch G matrix (G_type = 0 ==> node to edge) */
if (G_type == 0)
{
nrows_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
G_diag_i = hypre_CSRMatrixI(G_diag);
G_diag_j = hypre_CSRMatrixJ(G_diag);
}
else
{
nrows_G = hypre_ParCSRMatrixGlobalNumCols(G_csr);
ncols_G = hypre_ParCSRMatrixGlobalNumRows(G_csr);
G_diag = hypre_ParCSRMatrixDiag(G_csr);
T_diag_i = hypre_CSRMatrixI(G_diag);
T_diag_j = hypre_CSRMatrixJ(G_diag);
counts = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_G; i++) { counts[i] = 0; }
for (i = 0; i < T_diag_i[ncols_G]; i++) { counts[T_diag_j[i]]++; }
G_diag_i = hypre_TAlloc(HYPRE_Int, (nrows_G + 1), HYPRE_MEMORY_HOST);
G_diag_j = hypre_TAlloc(HYPRE_Int, T_diag_i[ncols_G], HYPRE_MEMORY_HOST);
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++) { G_diag_i[i] = G_diag_i[i - 1] + counts[i - 1]; }
for (i = 0; i < ncols_G; i++)
{
for (j = T_diag_i[i]; j < T_diag_i[i + 1]; j++)
{
k = T_diag_j[j];
offset = G_diag_i[k]++;
G_diag_j[offset] = i;
}
}
G_diag_i[0] = 0;
for (i = 1; i <= nrows_G; i++)
{
G_diag_i[i] = G_diag_i[i - 1] + counts[i - 1];
}
hypre_TFree(counts, HYPRE_MEMORY_HOST);
}
/* form G transpose in special form (2 nodes per edge max) */
GT_diag_mat = hypre_TAlloc(HYPRE_Int, 2 * ncols_G, HYPRE_MEMORY_HOST);
for (i = 0; i < 2 * ncols_G; i++) { GT_diag_mat[i] = -1; }
for (i = 0; i < nrows_G; i++)
{
for (j = G_diag_i[i]; j < G_diag_i[i + 1]; j++)
{
edge = G_diag_j[j];
if (GT_diag_mat[edge * 2] == -1) { GT_diag_mat[edge * 2] = i; }
else { GT_diag_mat[edge * 2 + 1] = i; }
}
}
/* BFS on the local matrix graph to find tree */
nodes_marked = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST);
edges_marked = hypre_TAlloc(HYPRE_Int, ncols_G, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_G; i++) { nodes_marked[i] = 0; }
for (i = 0; i < ncols_G; i++) { edges_marked[i] = 0; }
queue = hypre_TAlloc(HYPRE_Int, nrows_G, HYPRE_MEMORY_HOST);
queue_head = 0;
queue_tail = 1;
queue[0] = 0;
nodes_marked[0] = 1;
while ((queue_tail - queue_head) > 0)
{
node = queue[queue_tail - 1];
queue_tail--;
for (i = G_diag_i[node]; i < G_diag_i[node + 1]; i++)
{
edge = G_diag_j[i];
if (edges_marked[edge] == 0)
{
if (GT_diag_mat[2 * edge + 1] != -1)
{
node2 = GT_diag_mat[2 * edge];
if (node2 == node) { node2 = GT_diag_mat[2 * edge + 1]; }
if (nodes_marked[node2] == 0)
{
nodes_marked[node2] = 1;
edges_marked[edge] = 1;
queue[queue_tail] = node2;
queue_tail++;
}
}
}
}
}
hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST);
hypre_TFree(queue, HYPRE_MEMORY_HOST);
hypre_TFree(GT_diag_mat, HYPRE_MEMORY_HOST);
/* fetch the communication information from */
comm = hypre_ParCSRMatrixComm(G_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
if (nprocs == 1 && comm_pkg == NULL)
{
hypre_MatvecCommPkgCreate((hypre_ParCSRMatrix *) G_csr);
comm_pkg = hypre_ParCSRMatrixCommPkg(G_csr);
}
/* construct processor graph based on node-edge connection */
/* (local edges connected to neighbor processor nodes) */
n_children = 0;
nrecvs = nsends = 0;
if (nprocs > 1)
{
nsends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg);
nrecvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
proc_array = NULL;
if ((nsends + nrecvs) > 0)
{
n_proc_array = 0;
proc_array = hypre_TAlloc(HYPRE_Int, (nsends + nrecvs), HYPRE_MEMORY_HOST);
for (i = 0; i < nsends; i++) { proc_array[i] = send_procs[i]; }
for (i = 0; i < nrecvs; i++) { proc_array[nsends + i] = recv_procs[i]; }
hypre_qsort0(proc_array, 0, nsends + nrecvs - 1);
n_proc_array = 1;
for (i = 1; i < nrecvs + nsends; i++)
if (proc_array[i] != proc_array[n_proc_array])
{
proc_array[n_proc_array++] = proc_array[i];
}
}
pgraph_i = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST);
recv_cnts = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&n_proc_array, 1, HYPRE_MPI_INT, recv_cnts, 1,
HYPRE_MPI_INT, comm);
pgraph_i[0] = 0;
for (i = 1; i <= nprocs; i++)
{
pgraph_i[i] = pgraph_i[i - 1] + recv_cnts[i - 1];
}
pgraph_j = hypre_TAlloc(HYPRE_Int, pgraph_i[nprocs], HYPRE_MEMORY_HOST);
hypre_MPI_Allgatherv(proc_array, n_proc_array, HYPRE_MPI_INT, pgraph_j,
recv_cnts, pgraph_i, HYPRE_MPI_INT, comm);
hypre_TFree(recv_cnts, HYPRE_MEMORY_HOST);
/* BFS on the processor graph to determine parent and children */
nodes_marked = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST);
for (i = 0; i < nprocs; i++) { nodes_marked[i] = -1; }
queue = hypre_TAlloc(HYPRE_Int, nprocs, HYPRE_MEMORY_HOST);
queue_head = 0;
queue_tail = 1;
node = 0;
queue[0] = node;
while ((queue_tail - queue_head) > 0)
{
proc = queue[queue_tail - 1];
queue_tail--;
for (i = pgraph_i[proc]; i < pgraph_i[proc + 1]; i++)
{
proc2 = pgraph_j[i];
if (nodes_marked[proc2] < 0)
{
nodes_marked[proc2] = proc;
queue[queue_tail] = proc2;
queue_tail++;
}
}
}
parent = nodes_marked[mypid];
n_children = 0;
for (i = 0; i < nprocs; i++) if (nodes_marked[i] == mypid) { n_children++; }
if (n_children == 0) {n_children = 0; children = NULL;}
else
{
children = hypre_TAlloc(HYPRE_Int, n_children, HYPRE_MEMORY_HOST);
n_children = 0;
for (i = 0; i < nprocs; i++)
if (nodes_marked[i] == mypid) { children[n_children++] = i; }
}
hypre_TFree(nodes_marked, HYPRE_MEMORY_HOST);
hypre_TFree(queue, HYPRE_MEMORY_HOST);
hypre_TFree(pgraph_i, HYPRE_MEMORY_HOST);
hypre_TFree(pgraph_j, HYPRE_MEMORY_HOST);
}
/* first, connection with my parent : if the edge in my parent *
* is incident to one of my nodes, then my parent will mark it */
found = 0;
for (i = 0; i < nrecvs; i++)
{
proc = hypre_ParCSRCommPkgRecvProc(comm_pkg, i);
if (proc == parent)
{
found = 1;
break;
}
}
/* but if all the edges connected to my parent are on my side, *
* then I will just pick one of them as tree edge */
if (found == 0)
{
for (i = 0; i < nsends; i++)
{
proc = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == parent)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, k);
edges_marked[edge] = 1;
break;
}
}
}
/* next, if my processor has an edge incident on one node in my *
* child, put this edge on the tree. But if there is no such *
* edge, then I will assume my child will pick up an edge */
for (j = 0; j < n_children; j++)
{
proc = children[j];
for (i = 0; i < nsends; i++)
{
proc2 = hypre_ParCSRCommPkgSendProc(comm_pkg, i);
if (proc == proc2)
{
k = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
edge = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, k);
edges_marked[edge] = 1;
break;
}
}
}
if (n_children > 0)
{
hypre_TFree(children, HYPRE_MEMORY_HOST);
}
/* count the size of the tree */
tree_size = 0;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) { tree_size++; }
t_indices = hypre_TAlloc(HYPRE_Int, (tree_size + 1), HYPRE_MEMORY_HOST);
t_indices[0] = tree_size;
tree_size = 1;
for (i = 0; i < ncols_G; i++)
if (edges_marked[i] == 1) { t_indices[tree_size++] = i; }
(*indices) = t_indices;
hypre_TFree(edges_marked, HYPRE_MEMORY_HOST);
if (G_type != 0)
{
hypre_TFree(G_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(G_diag_j, HYPRE_MEMORY_HOST);
}
}
/* -----------------------------------------------------------------------------
* extract submatrices based on given indices
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices;
HYPRE_BigInt *itmp_array;
HYPRE_Int nnz11, nnz12, nnz21, nnz22, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int nrows, nnz;
HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts;
HYPRE_Int *diag_i, *diag_j, row, *offd_i;
HYPRE_Complex *A_diag_a, *diag_a;
hypre_ParCSRMatrix *A11_csr, *A12_csr, *A21_csr, *A22_csr;
hypre_CSRMatrix *A_diag, *diag, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices - 1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = (HYPRE_Int) hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
if (nprocs > 1)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractSubmatrices: cannot handle nprocs > 1 yet.\n");
exit(1);
}
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST);
proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
{
proc_offsets2[i] = itmp_array[i] - proc_offsets1[i];
}
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_A; i++) { exp_indices[i] = -1; }
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) { exp_indices[indices[i]] = i; }
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz12 = nnz21 = nnz22 = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) { nnz11++; }
else { nnz12++; }
}
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) { nnz21++; }
else { nnz22++; }
}
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz11;
/* This case is not yet implemented! */
global_nrows = 0;
global_ncols = 0;
row_starts = NULL;
col_starts = NULL;
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) { offd_i[i] = 0; }
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
/* -----------------------------------------------------
* create A12 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz12;
global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
}
A12_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
if (nnz > nnz_diag)
{
hypre_assert(0);
hypre_error(HYPRE_ERROR_GENERIC);
}
diag = hypre_ParCSRMatrixDiag(A12_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) { offd_i[i] = 0; }
offd = hypre_ParCSRMatrixOffd(A12_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
hypre_TFree(row_starts, HYPRE_MEMORY_HOST);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
/* -----------------------------------------------------
* create A21 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz21;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets1[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) { offd_i[i] = 0; }
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
hypre_TFree(row_starts, HYPRE_MEMORY_HOST);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
/* -----------------------------------------------------
* create A22 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = 0;
nnz_offd = 0;
nnz_diag = nnz22;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
global_ncols = (HYPRE_BigInt)proc_offsets2[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
}
A22_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0)
{
diag_j[nnz] = - exp_indices[col] - 1;
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A22_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nrows; i++) { offd_i[i] = 0; }
offd = hypre_ParCSRMatrixOffd(A22_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = NULL;
hypre_CSRMatrixData(offd) = NULL;
hypre_TFree(row_starts, HYPRE_MEMORY_HOST);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A12_csr;
(*submatrices)[2] = A21_csr;
(*submatrices)[3] = A22_csr;
hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST);
hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST);
hypre_TFree(exp_indices, HYPRE_MEMORY_HOST);
}
/* -----------------------------------------------------------------------------
* extract submatrices of a rectangular matrix
* ----------------------------------------------------------------------------- */
void hypre_ParCSRMatrixExtractRowSubmatrices( hypre_ParCSRMatrix *A_csr,
HYPRE_Int *indices2,
hypre_ParCSRMatrix ***submatrices )
{
HYPRE_Int nrows_A, nindices, *indices, *A_diag_i, *A_diag_j, mypid, nprocs;
HYPRE_Int i, j, k, *proc_offsets1, *proc_offsets2, *exp_indices;
HYPRE_Int nnz11, nnz21, col, ncols_offd, nnz_offd, nnz_diag;
HYPRE_Int *A_offd_i, *A_offd_j;
HYPRE_Int nrows, nnz;
HYPRE_BigInt global_nrows, global_ncols, *row_starts, *col_starts, *itmp_array;
HYPRE_Int *diag_i, *diag_j, row, *offd_i, *offd_j, nnz11_offd, nnz21_offd;
HYPRE_Complex *A_diag_a, *diag_a, *offd_a;
hypre_ParCSRMatrix *A11_csr, *A21_csr;
hypre_CSRMatrix *A_diag, *diag, *A_offd, *offd;
MPI_Comm comm;
/* -----------------------------------------------------
* first make sure the incoming indices are in order
* ----------------------------------------------------- */
nindices = indices2[0];
indices = &(indices2[1]);
hypre_qsort0(indices, 0, nindices - 1);
/* -----------------------------------------------------
* fetch matrix information
* ----------------------------------------------------- */
nrows_A = (HYPRE_Int)hypre_ParCSRMatrixGlobalNumRows(A_csr);
A_diag = hypre_ParCSRMatrixDiag(A_csr);
A_diag_i = hypre_CSRMatrixI(A_diag);
A_diag_j = hypre_CSRMatrixJ(A_diag);
A_diag_a = hypre_CSRMatrixData(A_diag);
A_offd = hypre_ParCSRMatrixOffd(A_csr);
A_offd_i = hypre_CSRMatrixI(A_offd);
A_offd_j = hypre_CSRMatrixJ(A_offd);
comm = hypre_ParCSRMatrixComm(A_csr);
hypre_MPI_Comm_rank(comm, &mypid);
hypre_MPI_Comm_size(comm, &nprocs);
/* -----------------------------------------------------
* compute new matrix dimensions
* ----------------------------------------------------- */
proc_offsets1 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST);
proc_offsets2 = hypre_TAlloc(HYPRE_Int, (nprocs + 1), HYPRE_MEMORY_HOST);
hypre_MPI_Allgather(&nindices, 1, HYPRE_MPI_INT, proc_offsets1, 1,
HYPRE_MPI_INT, comm);
k = 0;
for (i = 0; i < nprocs; i++)
{
j = proc_offsets1[i];
proc_offsets1[i] = k;
k += j;
}
proc_offsets1[nprocs] = k;
itmp_array = hypre_ParCSRMatrixRowStarts(A_csr);
for (i = 0; i <= nprocs; i++)
{
proc_offsets2[i] = (HYPRE_Int)(itmp_array[i] - proc_offsets1[i]);
}
/* -----------------------------------------------------
* assign id's to row and col for later processing
* ----------------------------------------------------- */
exp_indices = hypre_TAlloc(HYPRE_Int, nrows_A, HYPRE_MEMORY_HOST);
for (i = 0; i < nrows_A; i++) { exp_indices[i] = -1; }
for (i = 0; i < nindices; i++)
{
if (exp_indices[indices[i]] == -1) { exp_indices[indices[i]] = i; }
else
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, "ExtractRowSubmatrices: wrong index %d %d\n");
exit(1);
}
}
k = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
exp_indices[i] = - k - 1;
k++;
}
}
/* -----------------------------------------------------
* compute number of nonzeros for each block
* ----------------------------------------------------- */
nnz11 = nnz21 = nnz11_offd = nnz21_offd = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0) { nnz11++; }
}
nnz11_offd += A_offd_i[i + 1] - A_offd_i[i];
}
else
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] < 0) { nnz21++; }
}
nnz21_offd += A_offd_i[i + 1] - A_offd_i[i];
}
}
/* -----------------------------------------------------
* create A11 matrix (assume sequential for the moment)
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_diag = nnz11;
nnz_offd = nnz11_offd;
global_nrows = (HYPRE_BigInt)proc_offsets1[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets1[i];
col_starts[i] = itmp_array[i];
}
A11_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
col = A_diag_j[j];
if (exp_indices[col] >= 0)
{
diag_j[nnz] = exp_indices[col];
diag_a[nnz++] = A_diag_a[j];
}
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A11_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] >= 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A11_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
hypre_TFree(row_starts, HYPRE_MEMORY_HOST);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
/* -----------------------------------------------------
* create A21 matrix
* ----------------------------------------------------- */
ncols_offd = hypre_CSRMatrixNumCols(hypre_ParCSRMatrixDiag(A_csr));
nnz_offd = nnz21_offd;
nnz_diag = nnz21;
global_nrows = (HYPRE_BigInt)proc_offsets2[nprocs];
itmp_array = hypre_ParCSRMatrixColStarts(A_csr);
global_ncols = itmp_array[nprocs];
row_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
col_starts = hypre_CTAlloc(HYPRE_BigInt, nprocs + 1, HYPRE_MEMORY_HOST);
for (i = 0; i <= nprocs; i++)
{
row_starts[i] = (HYPRE_BigInt)proc_offsets2[i];
col_starts[i] = itmp_array[i];
}
A21_csr = hypre_ParCSRMatrixCreate(comm, global_nrows, global_ncols,
row_starts, col_starts, ncols_offd,
nnz_diag, nnz_offd);
nrows = nrows_A - nindices;
diag_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
diag_j = hypre_CTAlloc(HYPRE_Int, nnz_diag, HYPRE_MEMORY_HOST);
diag_a = hypre_CTAlloc(HYPRE_Complex, nnz_diag, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
diag_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
diag_j[nnz] = A_diag_j[j];
diag_a[nnz++] = A_diag_a[j];
}
row++;
diag_i[row] = nnz;
}
}
diag = hypre_ParCSRMatrixDiag(A21_csr);
hypre_CSRMatrixI(diag) = diag_i;
hypre_CSRMatrixJ(diag) = diag_j;
hypre_CSRMatrixData(diag) = diag_a;
offd_i = hypre_CTAlloc(HYPRE_Int, nrows + 1, HYPRE_MEMORY_HOST);
offd_j = hypre_CTAlloc(HYPRE_Int, nnz_offd, HYPRE_MEMORY_HOST);
offd_a = hypre_CTAlloc(HYPRE_Complex, nnz_offd, HYPRE_MEMORY_HOST);
nnz = 0;
row = 0;
offd_i[0] = 0;
for (i = 0; i < nrows_A; i++)
{
if (exp_indices[i] < 0)
{
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
offd_j[nnz] = A_offd_j[j];
offd_a[nnz++] = A_diag_a[j];
}
row++;
offd_i[row] = nnz;
}
}
offd = hypre_ParCSRMatrixOffd(A21_csr);
hypre_CSRMatrixI(offd) = offd_i;
hypre_CSRMatrixJ(offd) = offd_j;
hypre_CSRMatrixData(offd) = offd_a;
hypre_TFree(row_starts, HYPRE_MEMORY_HOST);
hypre_TFree(col_starts, HYPRE_MEMORY_HOST);
/* -----------------------------------------------------
* hand the matrices back to the caller and clean up
* ----------------------------------------------------- */
(*submatrices)[0] = A11_csr;
(*submatrices)[1] = A21_csr;
hypre_TFree(proc_offsets1, HYPRE_MEMORY_HOST);
hypre_TFree(proc_offsets2, HYPRE_MEMORY_HOST);
hypre_TFree(exp_indices, HYPRE_MEMORY_HOST);
}
/* -----------------------------------------------------------------------------
* return the sum of all local elements of the matrix
* ----------------------------------------------------------------------------- */
HYPRE_Complex hypre_ParCSRMatrixLocalSumElts( hypre_ParCSRMatrix * A )
{
hypre_CSRMatrix * A_diag = hypre_ParCSRMatrixDiag( A );
hypre_CSRMatrix * A_offd = hypre_ParCSRMatrixOffd( A );
return hypre_CSRMatrixSumElts(A_diag) + hypre_CSRMatrixSumElts(A_offd);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixMatAminvDB
* computes C = (A - inv(D)B) where D is a diagonal matrix
* Note: Data structure of A is expected to be a subset of data structure of B!
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixAminvDB( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B,
HYPRE_Complex *d,
hypre_ParCSRMatrix **C_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(B);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_ParCSRMatrix *C = NULL;
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
hypre_ParCSRCommPkg *comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_Int num_sends_B, num_recvs_B;
HYPRE_Int i, j, cnt;
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Complex *A_diag_data = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Complex *A_offd_data = hypre_CSRMatrixData(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int num_rows = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Complex *B_diag_data = hypre_CSRMatrixData(B_diag);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Complex *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_offd = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int num_procs, my_id;
HYPRE_Int *recv_procs_B;
HYPRE_Int *send_procs_B;
HYPRE_Int *recv_vec_starts_B;
HYPRE_Int *send_map_starts_B;
HYPRE_Int *send_map_elmts_B;
hypre_ParCSRCommPkg *comm_pkg_C;
HYPRE_Int *recv_procs_C;
HYPRE_Int *send_procs_C;
HYPRE_Int *recv_vec_starts_C;
HYPRE_Int *send_map_starts_C;
HYPRE_Int *send_map_elmts_C;
HYPRE_Int *map_to_B;
/*HYPRE_Int *C_diag_array;
HYPRE_Int *C_offd_array;*/
HYPRE_Complex *D_tmp;
HYPRE_Int size, rest, num_threads, ii;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
num_threads = hypre_NumThreads();
/*C_diag_array = hypre_CTAlloc(HYPRE_Int, num_threads);
C_offd_array = hypre_CTAlloc(HYPRE_Int, num_threads, HYPRE_MEMORY_HOST);*/
/*---------------------------------------------------------------------
* If there exists no CommPkg for B, a CommPkg is generated
*--------------------------------------------------------------------*/
if (!comm_pkg_B)
{
hypre_MatvecCommPkgCreate(B);
comm_pkg_B = hypre_ParCSRMatrixCommPkg(B);
}
C = hypre_ParCSRMatrixClone(B, 0);
/*hypre_ParCSRMatrixInitialize(C);*/
C_diag = hypre_ParCSRMatrixDiag(C);
C_diag_i = hypre_CSRMatrixI(C_diag);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
C_offd = hypre_ParCSRMatrixOffd(C);
C_offd_i = hypre_CSRMatrixI(C_offd);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
size = num_rows / num_threads;
rest = num_rows - size * num_threads;
D_tmp = hypre_CTAlloc(HYPRE_Complex, num_rows, HYPRE_MEMORY_HOST);
if (num_cols_offd_A)
{
map_to_B = hypre_CTAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_A; i++)
{
while (col_map_offd_B[cnt] < col_map_offd_A[i])
{
cnt++;
}
map_to_B[i] = cnt;
cnt++;
}
}
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(ii, i, j)
#endif
for (ii = 0; ii < num_threads; ii++)
{
HYPRE_Int *A_marker = NULL;
HYPRE_Int ns, ne, A_col, num_cols, nmax;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
nmax = hypre_max(num_rows, num_cols_offd_B);
A_marker = hypre_CTAlloc(HYPRE_Int, nmax, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows; i++)
{
A_marker[i] = -1;
}
for (i = ns; i < ne; i++)
{
D_tmp[i] = 1.0 / d[i];
}
num_cols = C_diag_i[ns];
for (i = ns; i < ne; i++)
{
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
A_col = A_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = A_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] += A_diag_data[j];
}
}
for (j = B_diag_i[i]; j < B_diag_i[i + 1]; j++)
{
A_col = B_diag_j[j];
if (A_marker[A_col] < C_diag_i[i])
{
A_marker[A_col] = num_cols;
C_diag_j[num_cols] = A_col;
C_diag_data[num_cols] = -D_tmp[i] * B_diag_data[j];
num_cols++;
}
else
{
C_diag_data[A_marker[A_col]] -= D_tmp[i] * B_diag_data[j];
}
}
}
for (i = 0; i < num_cols_offd_B; i++)
{
A_marker[i] = -1;
}
num_cols = C_offd_i[ns];
for (i = ns; i < ne; i++)
{
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
A_col = map_to_B[A_offd_j[j]];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = A_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] += A_offd_data[j];
}
}
for (j = B_offd_i[i]; j < B_offd_i[i + 1]; j++)
{
A_col = B_offd_j[j];
if (A_marker[A_col] < B_offd_i[i])
{
A_marker[A_col] = num_cols;
C_offd_j[num_cols] = A_col;
C_offd_data[num_cols] = -D_tmp[i] * B_offd_data[j];
num_cols++;
}
else
{
C_offd_data[A_marker[A_col]] -= D_tmp[i] * B_offd_data[j];
}
}
}
hypre_TFree(A_marker, HYPRE_MEMORY_HOST);
} /* end parallel region */
/*for (i=0; i < num_cols_offd_B; i++)
col_map_offd_C[i] = col_map_offd_B[i]; */
num_sends_B = hypre_ParCSRCommPkgNumSends(comm_pkg_B);
num_recvs_B = hypre_ParCSRCommPkgNumRecvs(comm_pkg_B);
recv_procs_B = hypre_ParCSRCommPkgRecvProcs(comm_pkg_B);
recv_vec_starts_B = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_B);
send_procs_B = hypre_ParCSRCommPkgSendProcs(comm_pkg_B);
send_map_starts_B = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_B);
send_map_elmts_B = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_B);
recv_procs_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B, HYPRE_MEMORY_HOST);
recv_vec_starts_C = hypre_CTAlloc(HYPRE_Int, num_recvs_B + 1, HYPRE_MEMORY_HOST);
send_procs_C = hypre_CTAlloc(HYPRE_Int, num_sends_B, HYPRE_MEMORY_HOST);
send_map_starts_C = hypre_CTAlloc(HYPRE_Int, num_sends_B + 1, HYPRE_MEMORY_HOST);
send_map_elmts_C = hypre_CTAlloc(HYPRE_Int, send_map_starts_B[num_sends_B], HYPRE_MEMORY_HOST);
for (i = 0; i < num_recvs_B; i++)
{
recv_procs_C[i] = recv_procs_B[i];
}
for (i = 0; i < num_recvs_B + 1; i++)
{
recv_vec_starts_C[i] = recv_vec_starts_B[i];
}
for (i = 0; i < num_sends_B; i++)
{
send_procs_C[i] = send_procs_B[i];
}
for (i = 0; i < num_sends_B + 1; i++)
{
send_map_starts_C[i] = send_map_starts_B[i];
}
for (i = 0; i < send_map_starts_B[num_sends_B]; i++)
{
send_map_elmts_C[i] = send_map_elmts_B[i];
}
comm_pkg_C = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_C) = comm;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_C) = num_recvs_B;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_C) = recv_procs_C;
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_C) = recv_vec_starts_C;
hypre_ParCSRCommPkgNumSends(comm_pkg_C) = num_sends_B;
hypre_ParCSRCommPkgSendProcs(comm_pkg_C) = send_procs_C;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_C) = send_map_starts_C;
hypre_ParCSRCommPkgSendMapElmts(comm_pkg_C) = send_map_elmts_C;
hypre_ParCSRMatrixCommPkg(C) = comm_pkg_C;
hypre_TFree(D_tmp, HYPRE_MEMORY_HOST);
if (num_cols_offd_A) { hypre_TFree(map_to_B, HYPRE_MEMORY_HOST); }
*C_ptr = C;
return (hypre_error_flag);
}
/*--------------------------------------------------------------------------
* hypre_ParTMatmul:
*
* Multiplies two ParCSRMatrices transpose(A) and B and returns
* the product in ParCSRMatrix C
*
* Note that C does not own the partitionings since its row_starts
* is owned by A and col_starts by B.
*--------------------------------------------------------------------------*/
hypre_ParCSRMatrix*
hypre_ParTMatmul( hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *AT_diag = NULL;
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrix *AT_offd = NULL;
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_BigInt first_col_diag_B = hypre_ParCSRMatrixFirstColDiag(B);
HYPRE_BigInt *col_starts_A = hypre_ParCSRMatrixColStarts(A);
HYPRE_BigInt *col_starts_B = hypre_ParCSRMatrixColStarts(B);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
hypre_ParCSRMatrix *C;
HYPRE_BigInt *col_map_offd_C = NULL;
HYPRE_Int *map_B_to_C;
hypre_CSRMatrix *C_diag = NULL;
hypre_CSRMatrix *C_tmp_diag = NULL;
HYPRE_Complex *C_diag_data = NULL;
HYPRE_Int *C_diag_i = NULL;
HYPRE_Int *C_diag_j = NULL;
HYPRE_BigInt first_col_diag_C;
HYPRE_BigInt last_col_diag_C;
hypre_CSRMatrix *C_offd = NULL;
hypre_CSRMatrix *C_tmp_offd = NULL;
hypre_CSRMatrix *C_int = NULL;
hypre_CSRMatrix *C_ext = NULL;
HYPRE_Int *C_ext_i;
HYPRE_BigInt *C_ext_j;
HYPRE_Complex *C_ext_data;
HYPRE_Int *C_ext_diag_i;
HYPRE_Int *C_ext_diag_j;
HYPRE_Complex *C_ext_diag_data;
HYPRE_Int *C_ext_offd_i;
HYPRE_Int *C_ext_offd_j;
HYPRE_Complex *C_ext_offd_data;
HYPRE_Int C_ext_size = 0;
HYPRE_Int C_ext_diag_size = 0;
HYPRE_Int C_ext_offd_size = 0;
HYPRE_Int *C_tmp_diag_i;
HYPRE_Int *C_tmp_diag_j;
HYPRE_Complex *C_tmp_diag_data;
HYPRE_Int *C_tmp_offd_i;
HYPRE_Int *C_tmp_offd_j;
HYPRE_Complex *C_tmp_offd_data;
HYPRE_Complex *C_offd_data = NULL;
HYPRE_Int *C_offd_i = NULL;
HYPRE_Int *C_offd_j = NULL;
HYPRE_BigInt *temp;
HYPRE_Int *send_map_starts_A;
HYPRE_Int *send_map_elmts_A;
HYPRE_Int num_sends_A;
HYPRE_Int num_cols_offd_C = 0;
HYPRE_Int *P_marker;
HYPRE_Int i, j;
HYPRE_Int i1, j_indx;
HYPRE_BigInt nrows_A, ncols_A;
HYPRE_BigInt nrows_B, ncols_B;
/*HYPRE_Int allsquare = 0;*/
HYPRE_Int cnt, cnt_offd, cnt_diag;
HYPRE_BigInt value;
HYPRE_Int num_procs, my_id;
HYPRE_Int max_num_threads;
HYPRE_Int *C_diag_array = NULL;
HYPRE_Int *C_offd_array = NULL;
HYPRE_BigInt first_row_index, first_col_diag;
HYPRE_Int local_num_rows, local_num_cols;
nrows_A = hypre_ParCSRMatrixGlobalNumRows(A);
ncols_A = hypre_ParCSRMatrixGlobalNumCols(A);
nrows_B = hypre_ParCSRMatrixGlobalNumRows(B);
ncols_B = hypre_ParCSRMatrixGlobalNumCols(B);
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
max_num_threads = hypre_NumThreads();
if (nrows_A != nrows_B || num_rows_diag_A != num_rows_diag_B)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Incompatible matrix dimensions!\n");
return NULL;
}
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
/*if (num_cols_diag_A == num_cols_diag_B) allsquare = 1;*/
/*---------------------------------------------------------------------
* If there exists no CommPkg for A, a CommPkg is generated using
* equally load balanced partitionings
*--------------------------------------------------------------------*/
HYPRE_ANNOTATE_FUNC_BEGIN;
if (!comm_pkg_A)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg_A = hypre_ParCSRMatrixCommPkg(A);
}
hypre_CSRMatrixTranspose(A_diag, &AT_diag, 1);
hypre_CSRMatrixTranspose(A_offd, &AT_offd, 1);
C_tmp_diag = hypre_CSRMatrixMultiply(AT_diag, B_diag);
C_ext_size = 0;
if (num_procs > 1)
{
hypre_CSRMatrix *C_int_diag;
hypre_CSRMatrix *C_int_offd;
void *request;
C_tmp_offd = hypre_CSRMatrixMultiply(AT_diag, B_offd);
C_int_diag = hypre_CSRMatrixMultiply(AT_offd, B_diag);
C_int_offd = hypre_CSRMatrixMultiply(AT_offd, B_offd);
hypre_ParCSRMatrixDiag(B) = C_int_diag;
hypre_ParCSRMatrixOffd(B) = C_int_offd;
C_int = hypre_MergeDiagAndOffd(B);
hypre_ParCSRMatrixDiag(B) = B_diag;
hypre_ParCSRMatrixOffd(B) = B_offd;
hypre_ExchangeExternalRowsInit(C_int, comm_pkg_A, &request);
C_ext = hypre_ExchangeExternalRowsWait(request);
C_ext_i = hypre_CSRMatrixI(C_ext);
C_ext_j = hypre_CSRMatrixBigJ(C_ext);
C_ext_data = hypre_CSRMatrixData(C_ext);
C_ext_size = C_ext_i[hypre_CSRMatrixNumRows(C_ext)];
hypre_CSRMatrixDestroy(C_int);
hypre_CSRMatrixDestroy(C_int_diag);
hypre_CSRMatrixDestroy(C_int_offd);
}
else
{
C_tmp_offd = hypre_CSRMatrixCreate(num_cols_diag_A, 0, 0);
hypre_CSRMatrixInitialize(C_tmp_offd);
hypre_CSRMatrixNumRownnz(C_tmp_offd) = 0;
}
hypre_CSRMatrixDestroy(AT_diag);
hypre_CSRMatrixDestroy(AT_offd);
/*-----------------------------------------------------------------------
* Add contents of C_ext to C_tmp_diag and C_tmp_offd
* to obtain C_diag and C_offd
*-----------------------------------------------------------------------*/
/* check for new nonzero columns in C_offd generated through C_ext */
first_col_diag_C = first_col_diag_B;
last_col_diag_C = first_col_diag_B + (HYPRE_BigInt)num_cols_diag_B - 1;
C_tmp_diag_i = hypre_CSRMatrixI(C_tmp_diag);
if (C_ext_size || num_cols_offd_B)
{
HYPRE_Int C_ext_num_rows;
num_sends_A = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
send_map_starts_A = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
send_map_elmts_A = hypre_ParCSRCommPkgSendMapElmts(comm_pkg_A);
C_ext_num_rows = send_map_starts_A[num_sends_A];
C_ext_diag_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows + 1, HYPRE_MEMORY_HOST);
C_ext_offd_i = hypre_CTAlloc(HYPRE_Int, C_ext_num_rows + 1, HYPRE_MEMORY_HOST);
temp = hypre_CTAlloc(HYPRE_BigInt, C_ext_size + num_cols_offd_B, HYPRE_MEMORY_HOST);
C_ext_diag_size = 0;
C_ext_offd_size = 0;
for (i = 0; i < C_ext_num_rows; i++)
{
for (j = C_ext_i[i]; j < C_ext_i[i + 1]; j++)
{
if (C_ext_j[j] < first_col_diag_C ||
C_ext_j[j] > last_col_diag_C)
{
temp[C_ext_offd_size++] = C_ext_j[j];
}
else
{
C_ext_diag_size++;
}
}
C_ext_diag_i[i + 1] = C_ext_diag_size;
C_ext_offd_i[i + 1] = C_ext_offd_size;
}
cnt = C_ext_offd_size;
for (i = 0; i < num_cols_offd_B; i++)
{
temp[cnt++] = col_map_offd_B[i];
}
if (cnt)
{
hypre_BigQsort0(temp, 0, cnt - 1);
value = temp[0];
num_cols_offd_C = 1;
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_C++] = value;
}
}
}
if (num_cols_offd_C)
{
col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_C; i++)
{
col_map_offd_C[i] = temp[i];
}
hypre_TFree(temp, HYPRE_MEMORY_HOST);
if (C_ext_diag_size)
{
C_ext_diag_j = hypre_CTAlloc(HYPRE_Int, C_ext_diag_size, HYPRE_MEMORY_HOST);
C_ext_diag_data = hypre_CTAlloc(HYPRE_Complex, C_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (C_ext_offd_size)
{
C_ext_offd_j = hypre_CTAlloc(HYPRE_Int, C_ext_offd_size, HYPRE_MEMORY_HOST);
C_ext_offd_data = hypre_CTAlloc(HYPRE_Complex, C_ext_offd_size, HYPRE_MEMORY_HOST);
}
C_tmp_diag_j = hypre_CSRMatrixJ(C_tmp_diag);
C_tmp_diag_data = hypre_CSRMatrixData(C_tmp_diag);
C_tmp_offd_i = hypre_CSRMatrixI(C_tmp_offd);
C_tmp_offd_j = hypre_CSRMatrixJ(C_tmp_offd);
C_tmp_offd_data = hypre_CSRMatrixData(C_tmp_offd);
cnt_offd = 0;
cnt_diag = 0;
for (i = 0; i < C_ext_num_rows; i++)
{
for (j = C_ext_i[i]; j < C_ext_i[i + 1]; j++)
{
if (C_ext_j[j] < first_col_diag_C ||
C_ext_j[j] > last_col_diag_C)
{
C_ext_offd_j[cnt_offd] = hypre_BigBinarySearch(col_map_offd_C,
C_ext_j[j],
num_cols_offd_C);
C_ext_offd_data[cnt_offd++] = C_ext_data[j];
}
else
{
C_ext_diag_j[cnt_diag] = (HYPRE_Int)(C_ext_j[j] - first_col_diag_C);
C_ext_diag_data[cnt_diag++] = C_ext_data[j];
}
}
}
}
if (C_ext)
{
hypre_CSRMatrixDestroy(C_ext);
C_ext = NULL;
}
if (num_cols_offd_B)
{
map_B_to_C = hypre_CTAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
cnt = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (col_map_offd_C[i] == col_map_offd_B[cnt])
{
map_B_to_C[cnt++] = i;
if (cnt == num_cols_offd_B) { break; }
}
}
for (i = 0; i < hypre_CSRMatrixI(C_tmp_offd)[hypre_CSRMatrixNumRows(C_tmp_offd)]; i++)
{
j_indx = C_tmp_offd_j[i];
C_tmp_offd_j[i] = map_B_to_C[j_indx];
}
}
/*-----------------------------------------------------------------------
* Need to compute:
* C_diag = C_tmp_diag + C_ext_diag
* C_offd = C_tmp_offd + C_ext_offd
*
* First generate structure
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A + 1, memory_location_C);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_diag_A + 1, memory_location_C);
C_diag_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
C_offd_array = hypre_CTAlloc(HYPRE_Int, max_num_threads, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int *B_marker = NULL;
HYPRE_Int *B_marker_offd = NULL;
HYPRE_Int ik, jk, j1, j2, jcol;
HYPRE_Int ns, ne, ii, nnz_d, nnz_o;
HYPRE_Int rest, size;
HYPRE_Int num_threads = hypre_NumActiveThreads();
size = num_cols_diag_A / num_threads;
rest = num_cols_diag_A - size * num_threads;
ii = hypre_GetThreadNum();
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
B_marker = hypre_CTAlloc(HYPRE_Int, num_cols_diag_B, HYPRE_MEMORY_HOST);
B_marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
for (ik = 0; ik < num_cols_diag_B; ik++)
{
B_marker[ik] = -1;
}
for (ik = 0; ik < num_cols_offd_C; ik++)
{
B_marker_offd[ik] = -1;
}
nnz_d = 0;
nnz_o = 0;
for (ik = ns; ik < ne; ik++)
{
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik + 1]; jk++)
{
jcol = C_tmp_diag_j[jk];
B_marker[jcol] = ik;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik + 1]; jk++)
{
jcol = C_tmp_offd_j[jk];
B_marker_offd[jcol] = ik;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
{
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk + 1]; j1++)
{
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1 + 1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < ik)
{
B_marker[jcol] = ik;
nnz_d++;
}
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1 + 1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < ik)
{
B_marker_offd[jcol] = ik;
nnz_o++;
}
}
break;
}
}
}
C_diag_array[ii] = nnz_d;
C_offd_array[ii] = nnz_o;
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
if (ii == 0)
{
nnz_d = 0;
nnz_o = 0;
for (ik = 0; ik < num_threads - 1; ik++)
{
C_diag_array[ik + 1] += C_diag_array[ik];
C_offd_array[ik + 1] += C_offd_array[ik];
}
nnz_d = C_diag_array[num_threads - 1];
nnz_o = C_offd_array[num_threads - 1];
C_diag_i[num_cols_diag_A] = nnz_d;
C_offd_i[num_cols_diag_A] = nnz_o;
C_diag = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_diag_A, nnz_d);
C_offd = hypre_CSRMatrixCreate(num_cols_diag_A, num_cols_offd_C, nnz_o);
hypre_CSRMatrixI(C_diag) = C_diag_i;
hypre_CSRMatrixInitialize_v2(C_diag, 0, memory_location_C);
C_diag_j = hypre_CSRMatrixJ(C_diag);
C_diag_data = hypre_CSRMatrixData(C_diag);
hypre_CSRMatrixI(C_offd) = C_offd_i;
hypre_CSRMatrixInitialize_v2(C_offd, 0, memory_location_C);
C_offd_j = hypre_CSRMatrixJ(C_offd);
C_offd_data = hypre_CSRMatrixData(C_offd);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
/*-----------------------------------------------------------------------
* Need to compute C_diag = C_tmp_diag + C_ext_diag
* and C_offd = C_tmp_offd + C_ext_offd !!!!
* Now fill in values
*-----------------------------------------------------------------------*/
for (ik = 0; ik < num_cols_diag_B; ik++)
{
B_marker[ik] = -1;
}
for (ik = 0; ik < num_cols_offd_C; ik++)
{
B_marker_offd[ik] = -1;
}
/*-----------------------------------------------------------------------
* Populate matrices
*-----------------------------------------------------------------------*/
nnz_d = 0;
nnz_o = 0;
if (ii)
{
nnz_d = C_diag_array[ii - 1];
nnz_o = C_offd_array[ii - 1];
}
for (ik = ns; ik < ne; ik++)
{
C_diag_i[ik] = nnz_d;
C_offd_i[ik] = nnz_o;
for (jk = C_tmp_diag_i[ik]; jk < C_tmp_diag_i[ik + 1]; jk++)
{
jcol = C_tmp_diag_j[jk];
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_tmp_diag_data[jk];
B_marker[jcol] = nnz_d;
nnz_d++;
}
for (jk = C_tmp_offd_i[ik]; jk < C_tmp_offd_i[ik + 1]; jk++)
{
jcol = C_tmp_offd_j[jk];
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_tmp_offd_data[jk];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
for (jk = 0; jk < num_sends_A; jk++)
{
for (j1 = send_map_starts_A[jk]; j1 < send_map_starts_A[jk + 1]; j1++)
{
if (send_map_elmts_A[j1] == ik)
{
for (j2 = C_ext_diag_i[j1]; j2 < C_ext_diag_i[j1 + 1]; j2++)
{
jcol = C_ext_diag_j[j2];
if (B_marker[jcol] < C_diag_i[ik])
{
C_diag_j[nnz_d] = jcol;
C_diag_data[nnz_d] = C_ext_diag_data[j2];
B_marker[jcol] = nnz_d;
nnz_d++;
}
else
{
C_diag_data[B_marker[jcol]] += C_ext_diag_data[j2];
}
}
for (j2 = C_ext_offd_i[j1]; j2 < C_ext_offd_i[j1 + 1]; j2++)
{
jcol = C_ext_offd_j[j2];
if (B_marker_offd[jcol] < C_offd_i[ik])
{
C_offd_j[nnz_o] = jcol;
C_offd_data[nnz_o] = C_ext_offd_data[j2];
B_marker_offd[jcol] = nnz_o;
nnz_o++;
}
else
{
C_offd_data[B_marker_offd[jcol]] += C_ext_offd_data[j2];
}
}
break;
}
}
}
}
hypre_TFree(B_marker, HYPRE_MEMORY_HOST);
hypre_TFree(B_marker_offd, HYPRE_MEMORY_HOST);
} /*end parallel region */
hypre_TFree(C_diag_array, HYPRE_MEMORY_HOST);
hypre_TFree(C_offd_array, HYPRE_MEMORY_HOST);
}
/*C = hypre_ParCSRMatrixCreate(comm, ncols_A, ncols_B, col_starts_A,
col_starts_B, num_cols_offd_C, nnz_diag, nnz_offd);
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C)); */
/* row_starts[0] is start of local rows. row_starts[1] is start of next
processor's rows */
first_row_index = col_starts_A[0];
local_num_rows = (HYPRE_Int)(col_starts_A[1] - first_row_index );
first_col_diag = col_starts_B[0];
local_num_cols = (HYPRE_Int)(col_starts_B[1] - first_col_diag);
C = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrixComm(C) = comm;
hypre_ParCSRMatrixGlobalNumRows(C) = ncols_A;
hypre_ParCSRMatrixGlobalNumCols(C) = ncols_B;
hypre_ParCSRMatrixFirstRowIndex(C) = first_row_index;
hypre_ParCSRMatrixFirstColDiag(C) = first_col_diag;
hypre_ParCSRMatrixLastRowIndex(C) = first_row_index + (HYPRE_BigInt)local_num_rows - 1;
hypre_ParCSRMatrixLastColDiag(C) = first_col_diag + (HYPRE_BigInt)local_num_cols - 1;
hypre_ParCSRMatrixColMapOffd(C) = NULL;
hypre_ParCSRMatrixAssumedPartition(C) = NULL;
hypre_ParCSRMatrixCommPkg(C) = NULL;
hypre_ParCSRMatrixCommPkgT(C) = NULL;
/* C row/col starts*/
hypre_ParCSRMatrixRowStarts(C)[0] = col_starts_A[0];
hypre_ParCSRMatrixRowStarts(C)[1] = col_starts_A[1];
hypre_ParCSRMatrixColStarts(C)[0] = col_starts_B[0];
hypre_ParCSRMatrixColStarts(C)[1] = col_starts_B[1];
/* set defaults */
hypre_ParCSRMatrixOwnsData(C) = 1;
hypre_ParCSRMatrixRowindices(C) = NULL;
hypre_ParCSRMatrixRowvalues(C) = NULL;
hypre_ParCSRMatrixGetrowactive(C) = 0;
if (C_diag)
{
hypre_CSRMatrixSetRownnz(C_diag);
hypre_ParCSRMatrixDiag(C) = C_diag;
}
else
{
hypre_ParCSRMatrixDiag(C) = C_tmp_diag;
}
if (C_offd)
{
hypre_CSRMatrixSetRownnz(C_offd);
hypre_ParCSRMatrixOffd(C) = C_offd;
}
else
{
hypre_ParCSRMatrixOffd(C) = C_tmp_offd;
}
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixDiag(C)) = memory_location_C;
hypre_CSRMatrixMemoryLocation(hypre_ParCSRMatrixOffd(C)) = memory_location_C;
if (num_cols_offd_C)
{
HYPRE_Int jj_count_offd, nnz_offd;
HYPRE_BigInt *new_col_map_offd_C = NULL;
P_marker = hypre_CTAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_offd_C; i++)
{
P_marker[i] = -1;
}
jj_count_offd = 0;
nnz_offd = C_offd_i[num_cols_diag_A];
for (i = 0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
if (P_marker[i1])
{
P_marker[i1] = 0;
jj_count_offd++;
}
}
if (jj_count_offd < num_cols_offd_C)
{
new_col_map_offd_C = hypre_CTAlloc(HYPRE_BigInt, jj_count_offd, HYPRE_MEMORY_HOST);
jj_count_offd = 0;
for (i = 0; i < num_cols_offd_C; i++)
{
if (!P_marker[i])
{
P_marker[i] = jj_count_offd;
new_col_map_offd_C[jj_count_offd++] = col_map_offd_C[i];
}
}
for (i = 0; i < nnz_offd; i++)
{
i1 = C_offd_j[i];
C_offd_j[i] = P_marker[i1];
}
num_cols_offd_C = jj_count_offd;
hypre_TFree(col_map_offd_C, HYPRE_MEMORY_HOST);
col_map_offd_C = new_col_map_offd_C;
hypre_CSRMatrixNumCols(hypre_ParCSRMatrixOffd(C)) = num_cols_offd_C;
}
hypre_TFree(P_marker, HYPRE_MEMORY_HOST);
}
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
/*-----------------------------------------------------------------------
* Free various arrays
*-----------------------------------------------------------------------*/
if (C_ext_size || num_cols_offd_B)
{
hypre_TFree(C_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_offd_i, HYPRE_MEMORY_HOST);
}
if (C_ext_diag_size)
{
hypre_TFree(C_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (C_ext_offd_size)
{
hypre_TFree(C_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(C_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_B)
{
hypre_TFree(map_B_to_C, HYPRE_MEMORY_HOST);
}
if (C_diag)
{
hypre_CSRMatrixDestroy(C_tmp_diag);
}
if (C_offd)
{
hypre_CSRMatrixDestroy(C_tmp_offd);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if ( hypre_GetExecPolicy2(memory_location_A, memory_location_B) == HYPRE_EXEC_DEVICE )
{
hypre_CSRMatrixMoveDiagFirstDevice(hypre_ParCSRMatrixDiag(C));
hypre_SyncComputeStream(hypre_handle());
}
#endif
HYPRE_ANNOTATE_FUNC_END;
return C;
}
/*--------------------------------------------------------------------------
* hypre_ParvecBdiagInvScal
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParvecBdiagInvScal( hypre_ParVector *b,
HYPRE_Int blockSize,
hypre_ParVector **bs,
hypre_ParCSRMatrix *A)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(b);
HYPRE_Int num_procs, my_id;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
HYPRE_Int i, j, s, block_start, block_end;
HYPRE_BigInt nrow_global = hypre_ParVectorGlobalSize(b);
HYPRE_BigInt first_row = hypre_ParVectorFirstIndex(b);
HYPRE_BigInt last_row = hypre_ParVectorLastIndex(b);
HYPRE_BigInt end_row = last_row + 1; /* one past-the-last */
HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)(blockSize) * (HYPRE_BigInt)blockSize;
HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) *
(HYPRE_BigInt)blockSize, nrow_global );
hypre_assert(blockSize == A->bdiag_size);
HYPRE_Complex *bdiaginv = A->bdiaginv;
hypre_ParCSRCommPkg *comm_pkg = A->bdiaginv_comm_pkg;
HYPRE_Complex *dense = bdiaginv;
//for (i=first_row_block; i < end_row; i+=blockSize) ;
//printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i);
/* local vector of b */
hypre_Vector *b_local = hypre_ParVectorLocalVector(b);
HYPRE_Complex *b_local_data = hypre_VectorData(b_local);
/* number of sends (#procs) */
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
HYPRE_Int num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
HYPRE_Int num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
hypre_ParCSRCommHandle *comm_handle;
hypre_ParVector *bnew = hypre_ParVectorCreate( hypre_ParVectorComm(b),
hypre_ParVectorGlobalSize(b), hypre_ParVectorPartitioning(b) );
hypre_ParVectorInitialize(bnew);
hypre_Vector *bnew_local = hypre_ParVectorLocalVector(bnew);
HYPRE_Complex *bnew_local_data = hypre_VectorData(bnew_local);
/* send and recv b */
HYPRE_Complex *send_b = hypre_TAlloc(HYPRE_Complex, num_rows_send, HYPRE_MEMORY_HOST);
HYPRE_Complex *recv_b = hypre_TAlloc(HYPRE_Complex, num_rows_recv, HYPRE_MEMORY_HOST);
for (i = 0; i < num_rows_send; i++)
{
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
send_b[i] = b_local_data[j];
}
comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, send_b, recv_b);
/* ... */
hypre_ParCSRCommHandleDestroy(comm_handle);
for (block_start = first_row_block; block_start < end_row_block; block_start += blockSize)
{
HYPRE_BigInt big_i;
block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global);
s = (HYPRE_Int)(block_end - block_start);
for (big_i = block_start; big_i < block_end; big_i++)
{
if (big_i < first_row || big_i >= end_row)
{
continue;
}
HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row);
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
bnew_local_data[local_i] = 0.0;
for (j = 0; j < s; j++)
{
HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j;
HYPRE_Complex val = dense[block_i + j * blockSize];
if (val == 0.0)
{
continue;
}
if (global_rid >= first_row && global_rid < end_row)
{
HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row);
bnew_local_data[local_i] += val * b_local_data[rid];
}
else
{
HYPRE_Int rid;
if (global_rid < first_row)
{
rid = (HYPRE_Int)(global_rid - first_row_block);
}
else
{
rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row);
}
bnew_local_data[local_i] += val * recv_b[rid];
}
}
}
dense += blockSize * blockSize;
}
hypre_TFree(send_b, HYPRE_MEMORY_HOST);
hypre_TFree(recv_b, HYPRE_MEMORY_HOST);
*bs = bnew;
return hypre_error_flag;
}
/**
* @brief Compute As = B^{-1}*A, where B is the block diagonal of A
* @param[in] A :
* @param[in] blockSize: block size
* @param[out] B :
* @return
* @warning
*/
HYPRE_Int
hypre_ParcsrBdiagInvScal( hypre_ParCSRMatrix *A,
HYPRE_Int blockSize,
hypre_ParCSRMatrix **As)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs, my_id;
hypre_MPI_Comm_rank(comm, &my_id);
hypre_MPI_Comm_size(comm, &num_procs);
HYPRE_Int i, j, k, s;
HYPRE_BigInt block_start, block_end;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_BigInt last_row = hypre_ParCSRMatrixLastRowIndex(A);
HYPRE_BigInt end_row = first_row + (HYPRE_BigInt)nrow_local; /* one past-the-last */
HYPRE_Int ncol_local = hypre_CSRMatrixNumCols(A_diag);
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
/* HYPRE_Int last_col = hypre_ParCSRMatrixLastColDiag(A); */
HYPRE_BigInt end_col = first_col + (HYPRE_BigInt)ncol_local;
HYPRE_BigInt nrow_global = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt ncol_global = hypre_ParCSRMatrixGlobalNumCols(A);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
void *request;
/* if square globally and locally */
HYPRE_Int square2 = (nrow_global == ncol_global) && (nrow_local == ncol_local) &&
(first_row == first_col);
if (nrow_global != ncol_global)
{
hypre_printf("hypre_ParcsrBdiagInvScal: only support N_ROW == N_COL\n");
return hypre_error_flag;
}
/* in block diagonals, row range of the blocks this proc span */
HYPRE_BigInt first_row_block = first_row / (HYPRE_BigInt)blockSize * (HYPRE_BigInt)blockSize;
HYPRE_BigInt end_row_block = hypre_min( (last_row / (HYPRE_BigInt)blockSize + 1) *
(HYPRE_BigInt)blockSize, nrow_global );
HYPRE_Int num_blocks = (HYPRE_Int)(last_row / (HYPRE_BigInt)blockSize + 1 - first_row /
(HYPRE_BigInt)blockSize);
//for (i=first_row_block; i < end_row; i+=blockSize) ;
//printf("===[%d %d), [ %d %d ) %d === \n", first_row, end_row, first_row_block, end_row_block, i);
//return 0;
/* number of external rows */
HYPRE_Int num_ext_rows = (HYPRE_Int)(end_row_block - first_row_block - (end_row - first_row));
HYPRE_BigInt *ext_indices;
HYPRE_Int A_ext_nnz;
hypre_CSRMatrix *A_ext = NULL;
HYPRE_Complex *A_ext_a = NULL;
HYPRE_Int *A_ext_i = NULL;
HYPRE_BigInt *A_ext_j = NULL;
HYPRE_Real *dense_all = hypre_CTAlloc(HYPRE_Complex, num_blocks * blockSize * blockSize,
HYPRE_MEMORY_HOST);
HYPRE_Real *dense = dense_all;
HYPRE_Int *IPIV = hypre_TAlloc(HYPRE_Int, blockSize, HYPRE_MEMORY_HOST);
HYPRE_Complex *dgetri_work = NULL;
HYPRE_Int dgetri_lwork = -1, lapack_info;
HYPRE_Int num_cols_A_offd_new;
HYPRE_BigInt *col_map_offd_A_new;
HYPRE_BigInt big_i;
HYPRE_Int *offd2new = NULL;
HYPRE_Int *marker_diag, *marker_newoffd;
HYPRE_Int nnz_diag = A_diag_i[nrow_local];
HYPRE_Int nnz_offd = A_offd_i[nrow_local];
HYPRE_Int nnz_diag_new = 0, nnz_offd_new = 0;
HYPRE_Int *A_diag_i_new, *A_diag_j_new, *A_offd_i_new, *A_offd_j_new;
HYPRE_Complex *A_diag_a_new, *A_offd_a_new;
/* heuristic */
HYPRE_Int nnz_diag_alloc = 2 * nnz_diag;
HYPRE_Int nnz_offd_alloc = 2 * nnz_offd;
A_diag_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST);
A_diag_j_new = hypre_CTAlloc(HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_offd_i_new = hypre_CTAlloc(HYPRE_Int, nrow_local + 1, HYPRE_MEMORY_HOST);
A_offd_j_new = hypre_CTAlloc(HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_CTAlloc(HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
hypre_ParCSRMatrix *Anew;
hypre_CSRMatrix *Anew_diag;
hypre_CSRMatrix *Anew_offd;
HYPRE_Real eps = 2.2e-16;
/* Start with extracting the external rows */
HYPRE_BigInt *ext_offd;
ext_indices = hypre_CTAlloc(HYPRE_BigInt, num_ext_rows, HYPRE_MEMORY_HOST);
j = 0;
for (big_i = first_row_block; big_i < first_row; big_i++)
{
ext_indices[j++] = big_i;
}
for (big_i = end_row; big_i < end_row_block; big_i++)
{
ext_indices[j++] = big_i;
}
hypre_assert(j == num_ext_rows);
/* create CommPkg for external rows */
hypre_ParCSRFindExtendCommPkg(comm, nrow_global, first_row, nrow_local, row_starts,
hypre_ParCSRMatrixAssumedPartition(A),
num_ext_rows, ext_indices, &A->bdiaginv_comm_pkg);
hypre_ParcsrGetExternalRowsInit(A, num_ext_rows, ext_indices, A->bdiaginv_comm_pkg, 1, &request);
A_ext = hypre_ParcsrGetExternalRowsWait(request);
hypre_TFree(ext_indices, HYPRE_MEMORY_HOST);
A_ext_i = hypre_CSRMatrixI(A_ext);
A_ext_j = hypre_CSRMatrixBigJ(A_ext);
A_ext_a = hypre_CSRMatrixData(A_ext);
A_ext_nnz = A_ext_i[num_ext_rows];
ext_offd = hypre_CTAlloc(HYPRE_BigInt, A_ext_nnz, HYPRE_MEMORY_HOST);
/* fint the offd incides in A_ext */
for (i = 0, j = 0; i < A_ext_nnz; i++)
{
/* global index */
HYPRE_BigInt cid = A_ext_j[i];
/* keep the offd indices */
if (cid < first_col || cid >= end_col)
{
ext_offd[j++] = cid;
}
}
/* remove duplicates after sorting (TODO better ways?) */
hypre_BigQsort0(ext_offd, 0, j - 1);
for (i = 0, k = 0; i < j; i++)
{
if (i == 0 || ext_offd[i] != ext_offd[i - 1])
{
ext_offd[k++] = ext_offd[i];
}
}
/* uniion these `k' new indices into col_map_offd_A */
col_map_offd_A_new = hypre_CTAlloc(HYPRE_BigInt, num_cols_A_offd + k, HYPRE_MEMORY_HOST);
if (k)
{
/* map offd to offd_new */
offd2new = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
}
hypre_union2(num_cols_A_offd, col_map_offd_A, k, ext_offd,
&num_cols_A_offd_new, col_map_offd_A_new, offd2new, NULL);
hypre_TFree(ext_offd, HYPRE_MEMORY_HOST);
/*
* adjust column indices in A_ext
*/
for (i = 0; i < A_ext_nnz; i++)
{
HYPRE_BigInt cid = A_ext_j[i];
if (cid < first_col || cid >= end_col)
{
j = hypre_BigBinarySearch(col_map_offd_A_new, cid, num_cols_A_offd_new);
/* searching must succeed */
hypre_assert(j >= 0 && j < num_cols_A_offd_new);
/* trick: save ncol_local + j back */
A_ext_j[i] = ncol_local + j;
}
else
{
/* save local index: [0, ncol_local-1] */
A_ext_j[i] = cid - first_col;
}
}
/* marker for diag */
marker_diag = hypre_TAlloc(HYPRE_Int, ncol_local, HYPRE_MEMORY_HOST);
for (i = 0; i < ncol_local; i++)
{
marker_diag[i] = -1;
}
/* marker for newoffd */
marker_newoffd = hypre_TAlloc(HYPRE_Int, num_cols_A_offd_new, HYPRE_MEMORY_HOST);
for (i = 0; i < num_cols_A_offd_new; i++)
{
marker_newoffd[i] = -1;
}
/* outer most loop for blocks */
for (block_start = first_row_block; block_start < end_row_block;
block_start += (HYPRE_BigInt)blockSize)
{
HYPRE_BigInt big_i;
block_end = hypre_min(block_start + (HYPRE_BigInt)blockSize, nrow_global);
s = (HYPRE_Int)(block_end - block_start);
/* 1. fill the dense block diag matrix */
for (big_i = block_start; big_i < block_end; big_i++)
{
/* row index in this block */
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
/* row index i: it can be local or external */
if (big_i >= first_row && big_i < end_row)
{
/* is a local row */
j = (HYPRE_Int)(big_i - first_row);
for (k = A_diag_i[j]; k < A_diag_i[j + 1]; k++)
{
HYPRE_BigInt cid = (HYPRE_BigInt)A_diag_j[k] + first_col;
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_diag_a[k];
}
}
if (num_cols_A_offd)
{
for (k = A_offd_i[j]; k < A_offd_i[j + 1]; k++)
{
HYPRE_BigInt cid = col_map_offd_A[A_offd_j[k]];
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_offd_a[k];
}
}
}
}
else
{
/* is an external row */
if (big_i < first_row)
{
j = (HYPRE_Int)(big_i - first_row_block);
}
else
{
j = (HYPRE_Int)(first_row - first_row_block + big_i - end_row);
}
for (k = A_ext_i[j]; k < A_ext_i[j + 1]; k++)
{
HYPRE_BigInt cid = A_ext_j[k];
/* recover the global index */
cid = cid < (HYPRE_BigInt)ncol_local ? cid + first_col : col_map_offd_A_new[cid - ncol_local];
if (cid >= block_start && cid < block_end)
{
dense[block_i + (HYPRE_Int)(cid - block_start)*blockSize] = A_ext_a[k];
}
}
}
}
/* 2. invert the dense matrix */
hypre_dgetrf(&s, &s, dense, &blockSize, IPIV, &lapack_info);
hypre_assert(lapack_info == 0);
if (lapack_info == 0)
{
HYPRE_Int query = -1;
HYPRE_Real lwork_opt;
/* query the optimal size of work */
hypre_dgetri(&s, dense, &blockSize, IPIV, &lwork_opt, &query, &lapack_info);
hypre_assert(lapack_info == 0);
if (lwork_opt > dgetri_lwork)
{
dgetri_lwork = lwork_opt;
dgetri_work = hypre_TReAlloc(dgetri_work, HYPRE_Complex, dgetri_lwork, HYPRE_MEMORY_HOST);
}
hypre_dgetri(&s, dense, &blockSize, IPIV, dgetri_work, &dgetri_lwork, &lapack_info);
hypre_assert(lapack_info == 0);
}
/* filter out *zeros* */
HYPRE_Real Fnorm = 0.0;
for (i = 0; i < s; i++)
{
for (j = 0; j < s; j++)
{
HYPRE_Complex t = dense[j + i * blockSize];
Fnorm += t * t;
}
}
Fnorm = sqrt(Fnorm);
for (i = 0; i < s; i++)
{
for (j = 0; j < s; j++)
{
if ( hypre_abs(dense[j + i * blockSize]) < eps * Fnorm )
{
dense[j + i * blockSize] = 0.0;
}
}
}
/* 3. premultiplication: one-pass dynamic allocation */
for (big_i = block_start; big_i < block_end; big_i++)
{
/* starting points of this row in j */
HYPRE_Int diag_i_start = nnz_diag_new;
HYPRE_Int offd_i_start = nnz_offd_new;
/* compute a new row with global index 'i' and local index 'local_i' */
HYPRE_Int local_i = (HYPRE_Int)(big_i - first_row);
/* row index in this block */
HYPRE_Int block_i = (HYPRE_Int)(big_i - block_start);
if (big_i < first_row || big_i >= end_row)
{
continue;
}
/* if square^2: reserve the first space in diag part to the diag entry */
if (square2)
{
marker_diag[local_i] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = local_i;
A_diag_a_new[nnz_diag_new] = 0.0;
nnz_diag_new ++;
}
/* combine s rows */
for (j = 0; j < s; j++)
{
/* row to combine: global row id */
HYPRE_BigInt global_rid = block_start + (HYPRE_BigInt)j;
/* the multipiler */
HYPRE_Complex val = dense[block_i + j * blockSize];
if (val == 0.0)
{
continue;
}
if (global_rid >= first_row && global_rid < end_row)
{
/* this row is local */
HYPRE_Int rid = (HYPRE_Int)(global_rid - first_row);
HYPRE_Int ii;
for (ii = A_diag_i[rid]; ii < A_diag_i[rid + 1]; ii++)
{
HYPRE_Int col = A_diag_j[ii];
HYPRE_Complex vv = A_diag_a[ii];
if (marker_diag[col] < diag_i_start)
{
/* this col has not been seen before, create new entry */
marker_diag[col] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = col;
A_diag_a_new[nnz_diag_new] = val * vv;
nnz_diag_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_diag[col];
hypre_assert(A_diag_j_new[p] == col);
A_diag_a_new[p] += val * vv;
}
}
for (ii = A_offd_i[rid]; ii < A_offd_i[rid + 1]; ii++)
{
HYPRE_Int col = A_offd_j[ii];
/* use the mapper to map to new offd */
HYPRE_Int col_new = offd2new ? offd2new[col] : col;
HYPRE_Complex vv = A_offd_a[ii];
if (marker_newoffd[col_new] < offd_i_start)
{
/* this col has not been seen before, create new entry */
marker_newoffd[col_new] = nnz_offd_new;
if (nnz_offd_new == nnz_offd_alloc)
{
nnz_offd_alloc = nnz_offd_alloc * 2 + 1;
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
}
A_offd_j_new[nnz_offd_new] = col_new;
A_offd_a_new[nnz_offd_new] = val * vv;
nnz_offd_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_newoffd[col_new];
hypre_assert(A_offd_j_new[p] == col_new);
A_offd_a_new[p] += val * vv;
}
}
}
else
{
/* this is an external row: go to A_ext */
HYPRE_Int rid, ii;
if (global_rid < first_row)
{
rid = (HYPRE_Int)(global_rid - first_row_block);
}
else
{
rid = (HYPRE_Int)(first_row - first_row_block + global_rid - end_row);
}
for (ii = A_ext_i[rid]; ii < A_ext_i[rid + 1]; ii++)
{
HYPRE_Int col = (HYPRE_Int)A_ext_j[ii];
HYPRE_Complex vv = A_ext_a[ii];
if (col < ncol_local)
{
/* in diag part */
if (marker_diag[col] < diag_i_start)
{
/* this col has not been seen before, create new entry */
marker_diag[col] = nnz_diag_new;
if (nnz_diag_new == nnz_diag_alloc)
{
nnz_diag_alloc = nnz_diag_alloc * 2 + 1;
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_alloc, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_alloc, HYPRE_MEMORY_HOST);
}
A_diag_j_new[nnz_diag_new] = col;
A_diag_a_new[nnz_diag_new] = val * vv;
nnz_diag_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_diag[col];
hypre_assert(A_diag_j_new[p] == col);
A_diag_a_new[p] += val * vv;
}
}
else
{
/* in offd part */
col -= ncol_local;
if (marker_newoffd[col] < offd_i_start)
{
/* this col has not been seen before, create new entry */
marker_newoffd[col] = nnz_offd_new;
if (nnz_offd_new == nnz_offd_alloc)
{
nnz_offd_alloc = nnz_offd_alloc * 2 + 1;
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_alloc, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_alloc, HYPRE_MEMORY_HOST);
}
A_offd_j_new[nnz_offd_new] = col;
A_offd_a_new[nnz_offd_new] = val * vv;
nnz_offd_new ++;
}
else
{
/* existing entry, update */
HYPRE_Int p = marker_newoffd[col];
hypre_assert(A_offd_j_new[p] == col);
A_offd_a_new[p] += val * vv;
}
}
}
}
}
/* done for row local_i */
A_diag_i_new[local_i + 1] = nnz_diag_new;
A_offd_i_new[local_i + 1] = nnz_offd_new;
} /* for i, each row */
dense += blockSize * blockSize;
} /* for each block */
/* done with all rows */
/* resize properly */
A_diag_j_new = hypre_TReAlloc(A_diag_j_new, HYPRE_Int, nnz_diag_new, HYPRE_MEMORY_HOST);
A_diag_a_new = hypre_TReAlloc(A_diag_a_new, HYPRE_Complex, nnz_diag_new, HYPRE_MEMORY_HOST);
A_offd_j_new = hypre_TReAlloc(A_offd_j_new, HYPRE_Int, nnz_offd_new, HYPRE_MEMORY_HOST);
A_offd_a_new = hypre_TReAlloc(A_offd_a_new, HYPRE_Complex, nnz_offd_new, HYPRE_MEMORY_HOST);
/* readjust col_map_offd_new */
for (i = 0; i < num_cols_A_offd_new; i++)
{
marker_newoffd[i] = -1;
}
for (i = 0; i < nnz_offd_new; i++)
{
j = A_offd_j_new[i];
if (marker_newoffd[j] == -1)
{
marker_newoffd[j] = 1;
}
}
for (i = 0, j = 0; i < num_cols_A_offd_new; i++)
{
if (marker_newoffd[i] == 1)
{
col_map_offd_A_new[j] = col_map_offd_A_new[i];
marker_newoffd[i] = j++;
}
}
num_cols_A_offd_new = j;
for (i = 0; i < nnz_offd_new; i++)
{
j = marker_newoffd[A_offd_j_new[i]];
hypre_assert(j >= 0 && j < num_cols_A_offd_new);
A_offd_j_new[i] = j;
}
/* Now, we should have everything of Parcsr matrix As */
Anew = hypre_ParCSRMatrixCreate(comm,
nrow_global,
ncol_global,
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
num_cols_A_offd_new,
nnz_diag_new,
nnz_offd_new);
Anew_diag = hypre_ParCSRMatrixDiag(Anew);
hypre_CSRMatrixData(Anew_diag) = A_diag_a_new;
hypre_CSRMatrixI(Anew_diag) = A_diag_i_new;
hypre_CSRMatrixJ(Anew_diag) = A_diag_j_new;
Anew_offd = hypre_ParCSRMatrixOffd(Anew);
hypre_CSRMatrixData(Anew_offd) = A_offd_a_new;
hypre_CSRMatrixI(Anew_offd) = A_offd_i_new;
hypre_CSRMatrixJ(Anew_offd) = A_offd_j_new;
hypre_ParCSRMatrixColMapOffd(Anew) = col_map_offd_A_new;
hypre_ParCSRMatrixSetNumNonzeros(Anew);
hypre_ParCSRMatrixDNumNonzeros(Anew) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(Anew);
//printf("nnz_diag %d --> %d, nnz_offd %d --> %d\n", nnz_diag, nnz_diag_new, nnz_offd, nnz_offd_new);
/* create CommPkg of Anew */
hypre_MatvecCommPkgCreate(Anew);
*As = Anew;
/*
if (bdiaginv)
{
*bdiaginv = dense_all;
}
else
{
hypre_TFree(dense_all, HYPRE_MEMORY_HOST);
}
*/
/* save diagonal blocks in A */
A->bdiag_size = blockSize;
A->bdiaginv = dense_all;
/* free workspace */
hypre_TFree(IPIV, HYPRE_MEMORY_HOST);
hypre_TFree(dgetri_work, HYPRE_MEMORY_HOST);
hypre_TFree(marker_diag, HYPRE_MEMORY_HOST);
hypre_TFree(marker_newoffd, HYPRE_MEMORY_HOST);
hypre_TFree(offd2new, HYPRE_MEMORY_HOST);
hypre_CSRMatrixDestroy(A_ext);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParcsrGetExternalRowsInit
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParcsrGetExternalRowsInit( hypre_ParCSRMatrix *A,
HYPRE_Int indices_len,
HYPRE_BigInt *indices,
hypre_ParCSRCommPkg *comm_pkg,
HYPRE_Int want_data,
void **request_ptr)
{
HYPRE_Int i, j, k;
HYPRE_Int num_sends, num_rows_send, num_nnz_send, *send_i,
num_recvs, num_rows_recv, num_nnz_recv, *recv_i,
*send_jstarts, *recv_jstarts, *send_i_offset;
HYPRE_BigInt *send_j, *recv_j;
HYPRE_Complex *send_a = NULL, *recv_a = NULL;
hypre_ParCSRCommPkg *comm_pkg_j;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
/* HYPRE_Int global_num_rows = hypre_ParCSRMatrixGlobalNumRows(A); */
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* HYPRE_Int local_num_rows = hypre_CSRMatrixNumRows(A_diag); */
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
/* HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A); */
/* HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A); */
HYPRE_BigInt first_col = hypre_ParCSRMatrixFirstColDiag(A);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Int num_procs;
HYPRE_Int my_id;
void **vrequest;
hypre_CSRMatrix *A_ext;
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* number of sends (#procs) */
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
/* number of rows to send */
num_rows_send = hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends);
/* number of recvs (#procs) */
num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg);
/* number of rows to recv */
num_rows_recv = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, num_recvs);
/* must be true if indices contains proper offd indices */
hypre_assert(indices_len == num_rows_recv);
/* send_i/recv_i:
* the arrays to send and recv: we first send and recv the row lengths */
send_i = hypre_TAlloc(HYPRE_Int, num_rows_send, HYPRE_MEMORY_HOST);
recv_i = hypre_CTAlloc(HYPRE_Int, num_rows_recv + 1, HYPRE_MEMORY_HOST);
/* fill the send array with row lengths */
for (i = 0, num_nnz_send = 0; i < num_rows_send; i++)
{
/* j: row index to send */
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
send_i[i] = A_diag_i[j + 1] - A_diag_i[j] + A_offd_i[j + 1] - A_offd_i[j];
num_nnz_send += send_i[i];
}
/* send this array out: note the shift in recv_i by one (async) */
comm_handle = hypre_ParCSRCommHandleCreate(11, comm_pkg, send_i, recv_i + 1);
/* prepare data to send out. overlap with the above commmunication */
send_j = hypre_TAlloc(HYPRE_BigInt, num_nnz_send, HYPRE_MEMORY_HOST);
if (want_data)
{
send_a = hypre_TAlloc(HYPRE_Complex, num_nnz_send, HYPRE_MEMORY_HOST);
}
send_i_offset = hypre_TAlloc(HYPRE_Int, num_rows_send + 1, HYPRE_MEMORY_HOST);
send_i_offset[0] = 0;
hypre_TMemcpy(send_i_offset + 1, send_i, HYPRE_Int, num_rows_send,
HYPRE_MEMORY_HOST, HYPRE_MEMORY_HOST);
/* prefix sum. TODO: OMP parallelization */
for (i = 1; i <= num_rows_send; i++)
{
send_i_offset[i] += send_i_offset[i - 1];
}
hypre_assert(send_i_offset[num_rows_send] == num_nnz_send);
/* pointers to each proc in send_j */
send_jstarts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i <= num_sends; i++)
{
send_jstarts[i] = send_i_offset[hypre_ParCSRCommPkgSendMapStart(comm_pkg, i)];
}
hypre_assert(send_jstarts[num_sends] == num_nnz_send);
/* fill the CSR matrix: j and a */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for HYPRE_SMP_SCHEDULE private(i,j,k)
#endif
for (i = 0; i < num_rows_send; i++)
{
HYPRE_Int i1 = send_i_offset[i];
j = hypre_ParCSRCommPkgSendMapElmt(comm_pkg, i);
/* open row j and fill ja and a to send */
for (k = A_diag_i[j]; k < A_diag_i[j + 1]; k++)
{
send_j[i1] = first_col + A_diag_j[k];
if (want_data)
{
send_a[i1] = A_diag_a[k];
}
i1++;
}
if (num_procs > 1)
{
for (k = A_offd_i[j]; k < A_offd_i[j + 1]; k++)
{
send_j[i1] = col_map_offd_A[A_offd_j[k]];
if (want_data)
{
send_a[i1] = A_offd_a[k];
}
i1++;
}
}
hypre_assert(send_i_offset[i + 1] == i1);
}
/* finish the above communication: send_i/recv_i */
hypre_ParCSRCommHandleDestroy(comm_handle);
/* adjust recv_i to ptrs */
for (i = 1; i <= num_rows_recv; i++)
{
recv_i[i] += recv_i[i - 1];
}
num_nnz_recv = recv_i[num_rows_recv];
recv_j = hypre_CTAlloc(HYPRE_BigInt, num_nnz_recv, HYPRE_MEMORY_HOST);
if (want_data)
{
recv_a = hypre_CTAlloc(HYPRE_Complex, num_nnz_recv, HYPRE_MEMORY_HOST);
}
recv_jstarts = hypre_CTAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
for (i = 1; i <= num_recvs; i++)
{
j = hypre_ParCSRCommPkgRecvVecStart(comm_pkg, i);
recv_jstarts[i] = recv_i[j];
}
/* ready to send and recv: create a communication package for data */
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm (comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends (comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs (comm_pkg_j) = hypre_ParCSRCommPkgSendProcs(comm_pkg);
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = send_jstarts;
hypre_ParCSRCommPkgNumRecvs (comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgRecvProcs (comm_pkg_j) = hypre_ParCSRCommPkgRecvProcs(comm_pkg);
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = recv_jstarts;
/* init communication */
/* ja */
comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, send_j, recv_j);
if (want_data)
{
/* a */
comm_handle_a = hypre_ParCSRCommHandleCreate(1, comm_pkg_j, send_a, recv_a);
}
else
{
comm_handle_a = NULL;
}
/* create A_ext */
A_ext = hypre_CSRMatrixCreate(num_rows_recv, hypre_ParCSRMatrixGlobalNumCols(A), num_nnz_recv);
hypre_CSRMatrixMemoryLocation(A_ext) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI (A_ext) = recv_i;
hypre_CSRMatrixBigJ(A_ext) = recv_j;
hypre_CSRMatrixData(A_ext) = recv_a;
/* output */
vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) A_ext;
vrequest[3] = (void *) comm_pkg_j;
*request_ptr = (void *) vrequest;
/* free */
hypre_TFree(send_i, HYPRE_MEMORY_HOST);
hypre_TFree(send_i_offset, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
hypre_CSRMatrix*
hypre_ParcsrGetExternalRowsWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *A_ext = (hypre_CSRMatrix *) request[2];
hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3];
HYPRE_BigInt *send_j = (HYPRE_BigInt *) hypre_ParCSRCommHandleSendData(
comm_handle_j);
if (comm_handle_a)
{
HYPRE_Complex *send_a = (HYPRE_Complex *) hypre_ParCSRCommHandleSendData(comm_handle_a);
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_TFree(send_a, HYPRE_MEMORY_HOST);
}
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_TFree(send_j, HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return A_ext;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixAdd: performs C = alpha*A + beta*B
*
* A and B are assumed to have the same row and column partitionings
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixAddHost( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
HYPRE_Complex beta,
hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix **C_ptr )
{
/* ParCSRMatrix data */
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_BigInt num_rows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt num_cols_A = hypre_ParCSRMatrixGlobalNumCols(A);
/* HYPRE_BigInt num_rows_B = hypre_ParCSRMatrixGlobalNumRows(B); */
/* HYPRE_BigInt num_cols_B = hypre_ParCSRMatrixGlobalNumCols(B); */
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *rownnz_diag_A = hypre_CSRMatrixRownnz(A_diag);
HYPRE_Int num_rownnz_diag_A = hypre_CSRMatrixNumRownnz(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int num_cols_diag_A = hypre_CSRMatrixNumCols(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *rownnz_offd_A = hypre_CSRMatrixRownnz(A_offd);
HYPRE_Int num_rownnz_offd_A = hypre_CSRMatrixNumRownnz(A_offd);
HYPRE_Int num_rows_offd_A = hypre_CSRMatrixNumRows(A_offd);
HYPRE_Int num_cols_offd_A = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *A2C_offd;
/* diag part of B */
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *rownnz_diag_B = hypre_CSRMatrixRownnz(B_diag);
HYPRE_Int num_rownnz_diag_B = hypre_CSRMatrixNumRownnz(B_diag);
HYPRE_Int num_rows_diag_B = hypre_CSRMatrixNumRows(B_diag);
/* HYPRE_Int num_cols_diag_B = hypre_CSRMatrixNumCols(B_diag); */
/* off-diag part of B */
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *rownnz_offd_B = hypre_CSRMatrixRownnz(B_offd);
HYPRE_Int num_rownnz_offd_B = hypre_CSRMatrixNumRownnz(B_offd);
HYPRE_Int num_rows_offd_B = hypre_CSRMatrixNumRows(B_offd);
HYPRE_Int num_cols_offd_B = hypre_CSRMatrixNumCols(B_offd);
HYPRE_BigInt *col_map_offd_B = hypre_ParCSRMatrixColMapOffd(B);
HYPRE_Int *B2C_offd;
/* C data */
hypre_ParCSRMatrix *C;
hypre_CSRMatrix *C_diag;
hypre_CSRMatrix *C_offd;
HYPRE_BigInt *col_map_offd_C;
HYPRE_Int *C_diag_i, *C_offd_i;
HYPRE_Int *rownnz_diag_C = NULL;
HYPRE_Int *rownnz_offd_C = NULL;
HYPRE_Int num_rownnz_diag_C;
HYPRE_Int num_rownnz_offd_C;
HYPRE_Int num_rows_diag_C = num_rows_diag_A;
HYPRE_Int num_cols_diag_C = num_cols_diag_A;
HYPRE_Int num_rows_offd_C = num_rows_offd_A;
HYPRE_Int num_cols_offd_C = num_cols_offd_A + num_cols_offd_B;
HYPRE_Int *twspace;
HYPRE_MemoryLocation memory_location_A = hypre_ParCSRMatrixMemoryLocation(A);
HYPRE_MemoryLocation memory_location_B = hypre_ParCSRMatrixMemoryLocation(B);
/* RL: TODO cannot guarantee, maybe should never assert
hypre_assert(memory_location_A == memory_location_B);
*/
/* RL: in the case of A=H, B=D, or A=D, B=H, let C = D,
* not sure if this is the right thing to do.
* Also, need something like this in other places
* TODO */
HYPRE_MemoryLocation memory_location_C = hypre_max(memory_location_A, memory_location_B);
HYPRE_ANNOTATE_FUNC_BEGIN;
/* Allocate memory */
twspace = hypre_TAlloc(HYPRE_Int, hypre_NumThreads(), HYPRE_MEMORY_HOST);
C_diag_i = hypre_CTAlloc(HYPRE_Int, num_rows_diag_A + 1, memory_location_C);
C_offd_i = hypre_CTAlloc(HYPRE_Int, num_rows_offd_A + 1, memory_location_C);
col_map_offd_C = hypre_TAlloc(HYPRE_BigInt, num_cols_offd_C, HYPRE_MEMORY_HOST);
/* Compute num_cols_offd_C, A2C_offd, and B2C_offd*/
A2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_A, HYPRE_MEMORY_HOST);
B2C_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_B, HYPRE_MEMORY_HOST);
hypre_union2(num_cols_offd_A, col_map_offd_A,
num_cols_offd_B, col_map_offd_B,
&num_cols_offd_C, col_map_offd_C,
A2C_offd, B2C_offd);
/* Set nonzero rows data of diag_C */
num_rownnz_diag_C = num_rows_diag_A;
if ((num_rownnz_diag_A < num_rows_diag_A) &&
(num_rownnz_diag_B < num_rows_diag_B))
{
hypre_MergeOrderedArrays( num_rownnz_diag_A, rownnz_diag_A,
num_rownnz_diag_B, rownnz_diag_B,
&num_rownnz_diag_C, &rownnz_diag_C);
}
/* Set nonzero rows data of offd_C */
num_rownnz_offd_C = num_rows_offd_A;
if ((num_rownnz_offd_A < num_rows_offd_A) &&
(num_rownnz_offd_B < num_rows_offd_B))
{
hypre_MergeOrderedArrays( num_rownnz_offd_A, rownnz_offd_A,
num_rownnz_offd_B, rownnz_offd_B,
&num_rownnz_offd_C, &rownnz_offd_C);
}
/* Set diag_C */
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int ii, num_threads;
HYPRE_Int size, rest, ns, ne;
HYPRE_Int *marker_diag;
HYPRE_Int *marker_offd;
ii = hypre_GetThreadNum();
num_threads = hypre_NumActiveThreads();
/*-----------------------------------------------------------------------
* Compute C_diag = alpha*A_diag + beta*B_diag
*-----------------------------------------------------------------------*/
size = num_rownnz_diag_C / num_threads;
rest = num_rownnz_diag_C - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
marker_diag = hypre_TAlloc(HYPRE_Int, num_cols_diag_A, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_diag,
NULL, NULL, A_diag, B_diag,
num_rows_diag_C, num_rownnz_diag_C,
num_cols_diag_C, rownnz_diag_C,
memory_location_C, C_diag_i, &C_diag);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_diag,
NULL, NULL, rownnz_diag_C,
alpha, beta, A_diag, B_diag, C_diag);
hypre_TFree(marker_diag, HYPRE_MEMORY_HOST);
/*-----------------------------------------------------------------------
* Compute C_offd = alpha*A_offd + beta*B_offd
*-----------------------------------------------------------------------*/
size = num_rownnz_offd_C / num_threads;
rest = num_rownnz_offd_C - size * num_threads;
if (ii < rest)
{
ns = ii * size + ii;
ne = (ii + 1) * size + ii + 1;
}
else
{
ns = ii * size + rest;
ne = (ii + 1) * size + rest;
}
marker_offd = hypre_TAlloc(HYPRE_Int, num_cols_offd_C, HYPRE_MEMORY_HOST);
hypre_CSRMatrixAddFirstPass(ns, ne, twspace, marker_offd,
A2C_offd, B2C_offd, A_offd, B_offd,
num_rows_offd_C, num_rownnz_offd_C,
num_cols_offd_C, rownnz_offd_C,
memory_location_C, C_offd_i, &C_offd);
hypre_CSRMatrixAddSecondPass(ns, ne, twspace, marker_offd,
A2C_offd, B2C_offd, rownnz_offd_C,
alpha, beta, A_offd, B_offd, C_offd);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
} /* end of omp parallel region */
/* Free memory */
hypre_TFree(twspace, HYPRE_MEMORY_HOST);
hypre_TFree(A2C_offd, HYPRE_MEMORY_HOST);
hypre_TFree(B2C_offd, HYPRE_MEMORY_HOST);
/* Create ParCSRMatrix C */
C = hypre_ParCSRMatrixCreate(comm,
num_rows_A,
num_cols_A,
hypre_ParCSRMatrixRowStarts(A),
hypre_ParCSRMatrixColStarts(A),
num_cols_offd_C,
hypre_CSRMatrixNumNonzeros(C_diag),
hypre_CSRMatrixNumNonzeros(C_offd));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixDiag(C));
hypre_CSRMatrixDestroy(hypre_ParCSRMatrixOffd(C));
hypre_ParCSRMatrixDiag(C) = C_diag;
hypre_ParCSRMatrixOffd(C) = C_offd;
hypre_ParCSRMatrixColMapOffd(C) = col_map_offd_C;
hypre_ParCSRMatrixSetNumNonzeros(C);
hypre_ParCSRMatrixDNumNonzeros(C) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(C);
/* create CommPkg of C */
hypre_MatvecCommPkgCreate(C);
*C_ptr = C;
HYPRE_ANNOTATE_FUNC_END;
return hypre_error_flag;
}
HYPRE_Int
hypre_ParCSRMatrixAdd( HYPRE_Complex alpha,
hypre_ParCSRMatrix *A,
HYPRE_Complex beta,
hypre_ParCSRMatrix *B,
hypre_ParCSRMatrix **C_ptr )
{
hypre_assert(hypre_ParCSRMatrixGlobalNumRows(A) == hypre_ParCSRMatrixGlobalNumRows(B));
hypre_assert(hypre_ParCSRMatrixGlobalNumCols(A) == hypre_ParCSRMatrixGlobalNumCols(B));
hypre_assert(hypre_ParCSRMatrixNumRows(A) == hypre_ParCSRMatrixNumRows(B));
hypre_assert(hypre_ParCSRMatrixNumCols(A) == hypre_ParCSRMatrixNumCols(B));
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
if ( hypre_GetExecPolicy2( hypre_ParCSRMatrixMemoryLocation(A),
hypre_ParCSRMatrixMemoryLocation(B) ) == HYPRE_EXEC_DEVICE )
{
hypre_ParCSRMatrixAddDevice(alpha, A, beta, B, C_ptr);
}
else
#endif
{
hypre_ParCSRMatrixAddHost(alpha, A, beta, B, C_ptr);
}
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixFnorm
*--------------------------------------------------------------------------*/
HYPRE_Real
hypre_ParCSRMatrixFnorm( hypre_ParCSRMatrix *A )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
HYPRE_Real f_diag, f_offd, local_result, result;
f_diag = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixDiag(A));
f_offd = hypre_CSRMatrixFnorm(hypre_ParCSRMatrixOffd(A));
local_result = f_diag * f_diag + f_offd * f_offd;
hypre_MPI_Allreduce(&local_result, &result, 1, HYPRE_MPI_REAL, hypre_MPI_SUM, comm);
return sqrt(result);
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixInfNorm
*
* Computes the infinity norm of A:
*
* norm = max_{i} sum_{j} |A_{ij}|
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixInfNorm( hypre_ParCSRMatrix *A,
HYPRE_Real *norm )
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int num_rows_diag_A = hypre_CSRMatrixNumRows(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
/* Local variables */
HYPRE_Int i, j;
HYPRE_Real maxsum = 0.0;
HYPRE_Real rowsum;
#ifdef _MSC_VER
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel private(i,j,rowsum)
#endif
{
HYPRE_Real maxsum_local;
maxsum_local = 0.0;
#ifdef HYPRE_USING_OPENMP
#pragma omp for HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows_diag_A; i++)
{
rowsum = 0.0;
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
rowsum += hypre_cabs(A_diag_a[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
rowsum += hypre_cabs(A_offd_a[j]);
}
maxsum_local = hypre_max(maxsum_local, rowsum);
}
#ifdef HYPRE_USING_OPENMP
#pragma omp critical
#endif
{
maxsum = hypre_max(maxsum, maxsum_local);
}
}
#else
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,rowsum) reduction(max:maxsum) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_rows_diag_A; i++)
{
rowsum = 0.0;
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
rowsum += hypre_cabs(A_diag_a[j]);
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
rowsum += hypre_cabs(A_offd_a[j]);
}
maxsum = hypre_max(maxsum, rowsum);
}
#endif
hypre_MPI_Allreduce(&maxsum, norm, 1, HYPRE_MPI_REAL, hypre_MPI_MAX, comm);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ExchangeExternalRowsInit
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ExchangeExternalRowsInit( hypre_CSRMatrix *B_ext,
hypre_ParCSRCommPkg *comm_pkg_A,
void **request_ptr)
{
MPI_Comm comm = hypre_ParCSRCommPkgComm(comm_pkg_A);
HYPRE_Int num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg_A);
HYPRE_Int *recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg_A);
HYPRE_Int *recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_A);
HYPRE_Int num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg_A);
HYPRE_Int *send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg_A);
HYPRE_Int *send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg_A);
HYPRE_Int num_elmts_send = send_map_starts[num_sends];
HYPRE_Int num_elmts_recv = recv_vec_starts[num_recvs];
HYPRE_Int *B_ext_i = B_ext ? hypre_CSRMatrixI(B_ext) : NULL;
HYPRE_BigInt *B_ext_j = B_ext ? hypre_CSRMatrixBigJ(B_ext) : NULL;
HYPRE_Complex *B_ext_data = B_ext ? hypre_CSRMatrixData(B_ext) : NULL;
HYPRE_Int B_ext_ncols = B_ext ? hypre_CSRMatrixNumCols(B_ext) : 0;
HYPRE_Int B_ext_nrows = B_ext ? hypre_CSRMatrixNumRows(B_ext) : 0;
HYPRE_Int *B_ext_rownnz = hypre_CTAlloc(HYPRE_Int, B_ext_nrows, HYPRE_MEMORY_HOST);
hypre_assert(num_elmts_recv == B_ext_nrows);
/* output matrix */
hypre_CSRMatrix *B_int;
HYPRE_Int B_int_nrows = num_elmts_send;
HYPRE_Int B_int_ncols = B_ext_ncols;
HYPRE_Int *B_int_i = hypre_TAlloc(HYPRE_Int, B_int_nrows + 1, HYPRE_MEMORY_HOST);
HYPRE_BigInt *B_int_j = NULL;
HYPRE_Complex *B_int_data = NULL;
HYPRE_Int B_int_nnz;
hypre_ParCSRCommHandle *comm_handle, *comm_handle_j, *comm_handle_a;
hypre_ParCSRCommPkg *comm_pkg_j;
HYPRE_Int *jdata_recv_vec_starts;
HYPRE_Int *jdata_send_map_starts;
HYPRE_Int i;
HYPRE_Int num_procs;
void **vrequest;
hypre_MPI_Comm_size(comm, &num_procs);
jdata_send_map_starts = hypre_TAlloc(HYPRE_Int, num_sends + 1, HYPRE_MEMORY_HOST);
/*--------------------------------------------------------------------------
* B_ext_rownnz contains the number of elements of row j
* (to be determined through send_map_elmnts on the receiving end)
*--------------------------------------------------------------------------*/
for (i = 0; i < B_ext_nrows; i++)
{
B_ext_rownnz[i] = B_ext_i[i + 1] - B_ext_i[i];
}
/*--------------------------------------------------------------------------
* initialize communication: send/recv the row nnz
* (note the use of comm_pkg_A, mode 12, as in transpose matvec
*--------------------------------------------------------------------------*/
comm_handle = hypre_ParCSRCommHandleCreate(12, comm_pkg_A, B_ext_rownnz, B_int_i + 1);
jdata_recv_vec_starts = hypre_TAlloc(HYPRE_Int, num_recvs + 1, HYPRE_MEMORY_HOST);
jdata_recv_vec_starts[0] = 0;
for (i = 1; i <= num_recvs; i++)
{
jdata_recv_vec_starts[i] = B_ext_i[recv_vec_starts[i]];
}
comm_pkg_j = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST);
hypre_ParCSRCommPkgComm(comm_pkg_j) = comm;
hypre_ParCSRCommPkgNumSends(comm_pkg_j) = num_recvs;
hypre_ParCSRCommPkgNumRecvs(comm_pkg_j) = num_sends;
hypre_ParCSRCommPkgSendProcs(comm_pkg_j) = recv_procs;
hypre_ParCSRCommPkgRecvProcs(comm_pkg_j) = send_procs;
hypre_ParCSRCommHandleDestroy(comm_handle);
/*--------------------------------------------------------------------------
* compute B_int: row nnz to row ptrs
*--------------------------------------------------------------------------*/
B_int_i[0] = 0;
for (i = 1; i <= B_int_nrows; i++)
{
B_int_i[i] += B_int_i[i - 1];
}
B_int_nnz = B_int_i[B_int_nrows];
B_int_j = hypre_TAlloc(HYPRE_BigInt, B_int_nnz, HYPRE_MEMORY_HOST);
B_int_data = hypre_TAlloc(HYPRE_Complex, B_int_nnz, HYPRE_MEMORY_HOST);
for (i = 0; i <= num_sends; i++)
{
jdata_send_map_starts[i] = B_int_i[send_map_starts[i]];
}
/* note the order of send/recv is reversed */
hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j) = jdata_send_map_starts;
hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j) = jdata_recv_vec_starts;
/* send/recv CSR rows */
comm_handle_a = hypre_ParCSRCommHandleCreate( 1, comm_pkg_j, B_ext_data, B_int_data);
comm_handle_j = hypre_ParCSRCommHandleCreate(21, comm_pkg_j, B_ext_j, B_int_j);
/* create CSR */
B_int = hypre_CSRMatrixCreate(B_int_nrows, B_int_ncols, B_int_nnz);
hypre_CSRMatrixMemoryLocation(B_int) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixI(B_int) = B_int_i;
hypre_CSRMatrixBigJ(B_int) = B_int_j;
hypre_CSRMatrixData(B_int) = B_int_data;
/* output */
vrequest = hypre_TAlloc(void *, 4, HYPRE_MEMORY_HOST);
vrequest[0] = (void *) comm_handle_j;
vrequest[1] = (void *) comm_handle_a;
vrequest[2] = (void *) B_int;
vrequest[3] = (void *) comm_pkg_j;
*request_ptr = (void *) vrequest;
hypre_TFree(B_ext_rownnz, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ExchangeExternalRowsWait
*--------------------------------------------------------------------------*/
hypre_CSRMatrix*
hypre_ExchangeExternalRowsWait(void *vrequest)
{
void **request = (void **) vrequest;
hypre_ParCSRCommHandle *comm_handle_j = (hypre_ParCSRCommHandle *) request[0];
hypre_ParCSRCommHandle *comm_handle_a = (hypre_ParCSRCommHandle *) request[1];
hypre_CSRMatrix *B_int = (hypre_CSRMatrix *) request[2];
hypre_ParCSRCommPkg *comm_pkg_j = (hypre_ParCSRCommPkg *) request[3];
/* communication done */
hypre_ParCSRCommHandleDestroy(comm_handle_a);
hypre_ParCSRCommHandleDestroy(comm_handle_j);
hypre_TFree(hypre_ParCSRCommPkgSendMapStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(hypre_ParCSRCommPkgRecvVecStarts(comm_pkg_j), HYPRE_MEMORY_HOST);
hypre_TFree(comm_pkg_j, HYPRE_MEMORY_HOST);
hypre_TFree(request, HYPRE_MEMORY_HOST);
return B_int;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixExtractSubmatrixFC
*
* extract submatrix A_{FF}, A_{FC}, A_{CF} or A_{CC}
* char job[2] = "FF", "FC", "CF" or "CC"
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixExtractSubmatrixFC( hypre_ParCSRMatrix *A,
HYPRE_Int *CF_marker,
HYPRE_BigInt *cpts_starts,
const char *job,
hypre_ParCSRMatrix **B_ptr,
HYPRE_Real strength_thresh)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Complex *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Complex *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
//HYPRE_Int *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
hypre_ParCSRMatrix *B;
hypre_CSRMatrix *B_diag, *B_offd;
HYPRE_Real *B_maxel_row;
HYPRE_Int *B_diag_i, *B_diag_j, *B_offd_i, *B_offd_j;
HYPRE_Complex *B_diag_a, *B_offd_a;
HYPRE_Int num_cols_B_offd;
HYPRE_BigInt *col_map_offd_B;
HYPRE_Int i, j, k, k1, k2;
HYPRE_BigInt B_nrow_global, B_ncol_global;
HYPRE_Int A_nlocal, B_nrow_local, B_ncol_local,
B_nnz_diag, B_nnz_offd;
HYPRE_BigInt total_global_fpts, total_global_cpts, fpts_starts[2];
HYPRE_Int nf_local, nc_local;
HYPRE_BigInt big_nf_local;
HYPRE_Int row_set, col_set;
HYPRE_BigInt *B_row_starts, *B_col_starts, B_first_col;
HYPRE_Int my_id, num_procs;
HYPRE_Int *sub_idx_diag;
HYPRE_BigInt *sub_idx_offd;
HYPRE_Int num_sends;
HYPRE_BigInt *send_buf_data;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
row_set = job[0] == 'F' ? -1 : 1;
col_set = job[1] == 'F' ? -1 : 1;
A_nlocal = hypre_CSRMatrixNumRows(A_diag);
/*-------------- global number of C points and local C points
* assuming cpts_starts is given */
if (row_set == 1 || col_set == 1)
{
if (my_id == (num_procs - 1))
{
total_global_cpts = cpts_starts[1];
}
hypre_MPI_Bcast(&total_global_cpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
nc_local = (HYPRE_Int)(cpts_starts[1] - cpts_starts[0]);
}
/*-------------- global number of F points, local F points, and F starts */
if (row_set == -1 || col_set == -1)
{
nf_local = 0;
for (i = 0; i < A_nlocal; i++)
{
if (CF_marker[i] < 0)
{
nf_local++;
}
}
big_nf_local = (HYPRE_BigInt) nf_local;
hypre_MPI_Scan(&big_nf_local, fpts_starts + 1, 1, HYPRE_MPI_BIG_INT, hypre_MPI_SUM, comm);
fpts_starts[0] = fpts_starts[1] - nf_local;
if (my_id == num_procs - 1)
{
total_global_fpts = fpts_starts[1];
}
hypre_MPI_Bcast(&total_global_fpts, 1, HYPRE_MPI_BIG_INT, num_procs - 1, comm);
}
if (row_set == -1 && col_set == -1)
{
/* FF */
B_nrow_local = nf_local;
B_ncol_local = nf_local;
B_nrow_global = total_global_fpts;
B_ncol_global = total_global_fpts;
B_row_starts = B_col_starts = fpts_starts;
}
else if (row_set == -1 && col_set == 1)
{
/* FC */
B_nrow_local = nf_local;
B_ncol_local = nc_local;
B_nrow_global = total_global_fpts;
B_ncol_global = total_global_cpts;
B_row_starts = fpts_starts;
B_col_starts = cpts_starts;
}
else if (row_set == 1 && col_set == -1)
{
/* CF */
B_nrow_local = nc_local;
B_ncol_local = nf_local;
B_nrow_global = total_global_cpts;
B_ncol_global = total_global_fpts;
B_row_starts = cpts_starts;
B_col_starts = fpts_starts;
}
else
{
/* CC */
B_nrow_local = nc_local;
B_ncol_local = nc_local;
B_nrow_global = total_global_cpts;
B_ncol_global = total_global_cpts;
B_row_starts = B_col_starts = cpts_starts;
}
/* global index of my first col */
B_first_col = B_col_starts[0];
/* sub_idx_diag: [local] mapping from F+C to F/C, if not selected, be -1 */
sub_idx_diag = hypre_TAlloc(HYPRE_Int, A_nlocal, HYPRE_MEMORY_HOST);
for (i = 0, k = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i == col_set)
{
sub_idx_diag[i] = k++;
}
else
{
sub_idx_diag[i] = -1;
}
}
hypre_assert(k == B_ncol_local);
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
send_buf_data = hypre_TAlloc(HYPRE_BigInt, hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends),
HYPRE_MEMORY_HOST);
k = 0;
for (i = 0; i < num_sends; i++)
{
/* start pos of elements sent to send_proc[i] */
HYPRE_Int si = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
HYPRE_Int ei = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1);
/* loop through all elems to send_proc[i] */
for (j = si; j < ei; j++)
{
/* j1: local idx */
HYPRE_BigInt j1 = sub_idx_diag[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
if (j1 != -1)
{
/* adjust j1 to B global idx */
j1 += B_first_col;
}
send_buf_data[k++] = j1;
}
}
hypre_assert(k == hypre_ParCSRCommPkgSendMapStart(comm_pkg, num_sends));
/* recv buffer */
sub_idx_offd = hypre_TAlloc(HYPRE_BigInt, num_cols_A_offd, HYPRE_MEMORY_HOST);
/* create a handle to start communication. 11: for integer */
comm_handle = hypre_ParCSRCommHandleCreate(21, comm_pkg, send_buf_data, sub_idx_offd);
/* destroy the handle to finish communication */
hypre_ParCSRCommHandleDestroy(comm_handle);
for (i = 0, num_cols_B_offd = 0; i < num_cols_A_offd; i++)
{
if (sub_idx_offd[i] != -1)
{
num_cols_B_offd ++;
}
}
col_map_offd_B = hypre_TAlloc(HYPRE_BigInt, num_cols_B_offd, HYPRE_MEMORY_HOST);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (sub_idx_offd[i] != -1)
{
col_map_offd_B[k] = sub_idx_offd[i];
sub_idx_offd[i] = k++;
}
}
hypre_assert(k == num_cols_B_offd);
/* count nnz and set ia */
B_nnz_diag = B_nnz_offd = 0;
B_maxel_row = hypre_TAlloc(HYPRE_Real, B_nrow_local, HYPRE_MEMORY_HOST);
B_diag_i = hypre_TAlloc(HYPRE_Int, B_nrow_local + 1, HYPRE_MEMORY_HOST);
B_offd_i = hypre_TAlloc(HYPRE_Int, B_nrow_local + 1, HYPRE_MEMORY_HOST);
B_diag_i[0] = B_offd_i[0] = 0;
for (i = 0, k = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i != row_set)
{
continue;
}
k++;
// Get max abs-value element of this row
HYPRE_Real temp_max = 0;
if (strength_thresh > 0)
{
for (j = A_diag_i[i] + 1; j < A_diag_i[i + 1]; j++)
{
if (hypre_cabs(A_diag_a[j]) > temp_max)
{
temp_max = hypre_cabs(A_diag_a[j]);
}
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
if (hypre_cabs(A_offd_a[j]) > temp_max)
{
temp_max = hypre_cabs(A_offd_a[j]);
}
}
}
B_maxel_row[k - 1] = temp_max;
// add one for diagonal element
j = A_diag_i[i];
if (sub_idx_diag[A_diag_j[j]] != -1)
{
B_nnz_diag++;
}
// Count nnzs larger than tolerance times max row element
for (j = A_diag_i[i] + 1; j < A_diag_i[i + 1]; j++)
{
if ( (sub_idx_diag[A_diag_j[j]] != -1) &&
(hypre_cabs(A_diag_a[j]) > (strength_thresh * temp_max)) )
{
B_nnz_diag++;
}
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
if ( (sub_idx_offd[A_offd_j[j]] != -1) &&
(hypre_cabs(A_offd_a[j]) > (strength_thresh * temp_max)) )
{
B_nnz_offd++;
}
}
B_diag_i[k] = B_nnz_diag;
B_offd_i[k] = B_nnz_offd;
}
hypre_assert(k == B_nrow_local);
B_diag_j = hypre_TAlloc(HYPRE_Int, B_nnz_diag, HYPRE_MEMORY_HOST);
B_diag_a = hypre_TAlloc(HYPRE_Complex, B_nnz_diag, HYPRE_MEMORY_HOST);
B_offd_j = hypre_TAlloc(HYPRE_Int, B_nnz_offd, HYPRE_MEMORY_HOST);
B_offd_a = hypre_TAlloc(HYPRE_Complex, B_nnz_offd, HYPRE_MEMORY_HOST);
for (i = 0, k = 0, k1 = 0, k2 = 0; i < A_nlocal; i++)
{
HYPRE_Int CF_i = CF_marker[i] > 0 ? 1 : -1;
if (CF_i != row_set)
{
continue;
}
HYPRE_Real maxel = B_maxel_row[k];
k++;
for (j = A_diag_i[i]; j < A_diag_i[i + 1]; j++)
{
HYPRE_Int j1 = sub_idx_diag[A_diag_j[j]];
if ( (j1 != -1) && ( (hypre_cabs(A_diag_a[j]) > (strength_thresh * maxel)) || j == A_diag_i[i] ) )
{
B_diag_j[k1] = j1;
B_diag_a[k1] = A_diag_a[j];
k1++;
}
}
for (j = A_offd_i[i]; j < A_offd_i[i + 1]; j++)
{
HYPRE_Int j1 = sub_idx_offd[A_offd_j[j]];
if ((j1 != -1) && (hypre_cabs(A_offd_a[j]) > (strength_thresh * maxel)))
{
hypre_assert(j1 >= 0 && j1 < num_cols_B_offd);
B_offd_j[k2] = j1;
B_offd_a[k2] = A_offd_a[j];
k2++;
}
}
}
hypre_assert(k1 == B_nnz_diag && k2 == B_nnz_offd);
/* ready to create B = A(rowset, colset) */
B = hypre_ParCSRMatrixCreate(comm,
B_nrow_global,
B_ncol_global,
B_row_starts,
B_col_starts,
num_cols_B_offd,
B_nnz_diag,
B_nnz_offd);
B_diag = hypre_ParCSRMatrixDiag(B);
hypre_CSRMatrixMemoryLocation(B_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixData(B_diag) = B_diag_a;
hypre_CSRMatrixI(B_diag) = B_diag_i;
hypre_CSRMatrixJ(B_diag) = B_diag_j;
B_offd = hypre_ParCSRMatrixOffd(B);
hypre_CSRMatrixMemoryLocation(B_offd) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixData(B_offd) = B_offd_a;
hypre_CSRMatrixI(B_offd) = B_offd_i;
hypre_CSRMatrixJ(B_offd) = B_offd_j;
hypre_ParCSRMatrixColMapOffd(B) = col_map_offd_B;
hypre_ParCSRMatrixSetNumNonzeros(B);
hypre_ParCSRMatrixDNumNonzeros(B) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(B);
hypre_MatvecCommPkgCreate(B);
*B_ptr = B;
hypre_TFree(B_maxel_row, HYPRE_MEMORY_HOST);
hypre_TFree(send_buf_data, HYPRE_MEMORY_HOST);
hypre_TFree(sub_idx_diag, HYPRE_MEMORY_HOST);
hypre_TFree(sub_idx_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* drop the entries that are not on the diagonal and smaller than:
* type 0: tol (TODO)
* type 1: tol*(1-norm of row)
* type 2: tol*(2-norm of row)
* type -1: tol*(infinity norm of row) */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntriesHost( hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int type)
{
HYPRE_Int i, j, k, nnz_diag, nnz_offd, A_diag_i_i, A_offd_i_i;
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
/* diag part of A */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Real *A_diag_a = hypre_CSRMatrixData(A_diag);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
/* off-diag part of A */
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Real *A_offd_a = hypre_CSRMatrixData(A_offd);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Int num_cols_A_offd = hypre_CSRMatrixNumCols(A_offd);
HYPRE_BigInt *col_map_offd_A = hypre_ParCSRMatrixColMapOffd(A);
HYPRE_Int *marker_offd = NULL;
HYPRE_BigInt first_row = hypre_ParCSRMatrixFirstRowIndex(A);
HYPRE_Int nrow_local = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int my_id, num_procs;
/* MPI size and rank*/
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
marker_offd = hypre_CTAlloc(HYPRE_Int, num_cols_A_offd, HYPRE_MEMORY_HOST);
nnz_diag = nnz_offd = A_diag_i_i = A_offd_i_i = 0;
for (i = 0; i < nrow_local; i++)
{
/* compute row norm */
HYPRE_Real row_nrm = 0.0;
for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++)
{
HYPRE_Complex v = A_diag_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v * v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++)
{
HYPRE_Complex v = A_offd_a[j];
if (type == 1)
{
row_nrm += fabs(v);
}
else if (type == 2)
{
row_nrm += v * v;
}
else
{
row_nrm = hypre_max(row_nrm, fabs(v));
}
}
}
if (type == 2)
{
row_nrm = sqrt(row_nrm);
}
/* drop small entries based on tol and row norm */
for (j = A_diag_i_i; j < A_diag_i[i + 1]; j++)
{
HYPRE_Int col = A_diag_j[j];
HYPRE_Complex val = A_diag_a[j];
if (i == col || fabs(val) >= tol * row_nrm)
{
A_diag_j[nnz_diag] = col;
A_diag_a[nnz_diag] = val;
nnz_diag ++;
}
}
if (num_procs > 1)
{
for (j = A_offd_i_i; j < A_offd_i[i + 1]; j++)
{
HYPRE_Int col = A_offd_j[j];
HYPRE_Complex val = A_offd_a[j];
/* in normal cases: diagonal entry should not
* appear in A_offd (but this can still be possible) */
if (i + first_row == col_map_offd_A[col] || fabs(val) >= tol * row_nrm)
{
if (0 == marker_offd[col])
{
marker_offd[col] = 1;
}
A_offd_j[nnz_offd] = col;
A_offd_a[nnz_offd] = val;
nnz_offd ++;
}
}
}
A_diag_i_i = A_diag_i[i + 1];
A_offd_i_i = A_offd_i[i + 1];
A_diag_i[i + 1] = nnz_diag;
A_offd_i[i + 1] = nnz_offd;
}
hypre_CSRMatrixNumNonzeros(A_diag) = nnz_diag;
hypre_CSRMatrixNumNonzeros(A_offd) = nnz_offd;
hypre_ParCSRMatrixSetNumNonzeros(A);
hypre_ParCSRMatrixDNumNonzeros(A) = (HYPRE_Real) hypre_ParCSRMatrixNumNonzeros(A);
for (i = 0, k = 0; i < num_cols_A_offd; i++)
{
if (marker_offd[i])
{
col_map_offd_A[k] = col_map_offd_A[i];
marker_offd[i] = k++;
}
}
/* num_cols_A_offd = k; */
hypre_CSRMatrixNumCols(A_offd) = k;
for (i = 0; i < nnz_offd; i++)
{
A_offd_j[i] = marker_offd[A_offd_j[i]];
}
if ( hypre_ParCSRMatrixCommPkg(A) )
{
hypre_MatvecCommPkgDestroy( hypre_ParCSRMatrixCommPkg(A) );
}
hypre_MatvecCommPkgCreate(A);
hypre_TFree(marker_offd, HYPRE_MEMORY_HOST);
return hypre_error_flag;
}
/* drop the entries that are not on the diagonal and smaller than
* type 0: tol
* type 1: tol*(1-norm of row)
* type 2: tol*(2-norm of row)
* type -1: tol*(infinity norm of row)
* NOTE: some type options above unavailable on either host or device */
HYPRE_Int
hypre_ParCSRMatrixDropSmallEntries( hypre_ParCSRMatrix *A,
HYPRE_Real tol,
HYPRE_Int type)
{
if (tol <= 0.0)
{
return hypre_error_flag;
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPushRange("ParCSRMatrixDropSmallEntries");
#endif
HYPRE_Int ierr = 0;
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy1( hypre_ParCSRMatrixMemoryLocation(A) );
if (exec == HYPRE_EXEC_DEVICE)
{
ierr = hypre_ParCSRMatrixDropSmallEntriesDevice(A, tol, type);
}
else
#endif
{
ierr = hypre_ParCSRMatrixDropSmallEntriesHost(A, tol, type);
}
#if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP)
hypre_GpuProfilingPopRange();
#endif
return ierr;
}
/* Scale ParCSR matrix A = scalar * A
* A: the target CSR matrix
* scalar: real number
*/
HYPRE_Int
hypre_ParCSRMatrixScale(hypre_ParCSRMatrix *A,
HYPRE_Complex scalar)
{
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
hypre_CSRMatrixScale(A_diag, scalar);
hypre_CSRMatrixScale(A_offd, scalar);
return hypre_error_flag;
}
/*--------------------------------------------------------------------------
* hypre_ParCSRMatrixReorder:
*
* Reorders the column and data arrays of a the diagonal component of a square
* ParCSR matrix, such that the first entry in each row is the diagonal one.
*--------------------------------------------------------------------------*/
HYPRE_Int
hypre_ParCSRMatrixReorder(hypre_ParCSRMatrix *A)
{
HYPRE_BigInt nrows_A = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_BigInt ncols_A = hypre_ParCSRMatrixGlobalNumCols(A);
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
if (nrows_A != ncols_A)
{
hypre_error_w_msg(HYPRE_ERROR_GENERIC, " Error! Matrix should be square!\n");
return hypre_error_flag;
}
hypre_CSRMatrixReorder(A_diag);
return hypre_error_flag;
}
|
SpatialDilatedMaxPooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "THNN/generic/SpatialDilatedMaxPooling.c"
#else
#include <THNN/generic/pooling_shape.h>
#include <algorithm>
static inline void THNN_(SpatialDilatedMaxPooling_shapeCheck)(
THTensor *input, THTensor *gradOutput, THIndexTensor *indices,
int kH, int kW, int dH, int dW, int padH, int padW,
int dilationH, int dilationW, bool ceil_mode) {
THArgCheck(kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW);
THArgCheck(dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dH: %d dW: %d", dH, dW);
THArgCheck(dilationH > 0 && dilationW > 0, 12,
"dilation should be greater than zero, but got dilationH: %d dilationW: %d",
dilationH, dilationW);
int ndim = input->dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
THNN_ARGCHECK(!input->is_empty() && (ndim == 3 || ndim == 4), 2, input,
"non-empty 3D or 4D input tensor expected but got: %s");
THArgCheck(kW/2 >= padW && kH/2 >= padH, 2,
"pad should be smaller than half of kernel size, but got "
"padW = %d, padH = %d, kW = %d, kH = %d",
padW, padH, kW, kH);
int64_t nInputPlane = input->size(dimh-1);
int64_t inputHeight = input->size(dimh);
int64_t inputWidth = input->size(dimw);
int64_t nOutputPlane = nInputPlane;
int64_t outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
int64_t outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
if (outputWidth < 1 || outputHeight < 1)
THError("Given input size: (%dx%dx%d). "
"Calculated output size: (%dx%dx%d). Output size is too small",
nInputPlane,inputHeight,inputWidth,nInputPlane,outputHeight,outputWidth);
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, outputWidth);
}
if (indices != NULL) {
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimf, nOutputPlane);
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimh, outputHeight);
THNN_CHECK_DIM_SIZE_INDICES(indices, ndim, dimw, outputWidth);
}
}
static void THNN_(SpatialDilatedMaxPooling_updateOutput_frame)(
scalar_t *input_p,
scalar_t *output_p,
THIndex_t *ind_p,
int64_t nslices,
int64_t iwidth,
int64_t iheight,
int64_t owidth,
int64_t oheight,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int dilationW,
int dilationH
)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
int64_t i, j;
scalar_t *ip = input_p + k*iwidth*iheight;
for(i = 0; i < oheight; i++)
{
for(j = 0; j < owidth; j++)
{
int64_t hstart = i * dH - padH;
int64_t wstart = j * dW - padW;
int64_t hend = std::min(hstart + (kH - 1) * dilationH + 1, iheight);
int64_t wend = std::min(wstart + (kW - 1) * dilationW + 1, iwidth);
while(hstart < 0)
hstart += dilationH;
while(wstart < 0)
wstart += dilationW;
/* local pointers */
scalar_t *op = output_p + k*owidth*oheight + i*owidth + j;
THIndex_t *indp = ind_p + k*owidth*oheight + i*owidth + j;
/* compute local max: */
int64_t maxindex = -1;
scalar_t maxval = -THInf;
int64_t tcntr = 0;
int64_t x,y;
for(y = hstart; y < hend; y += dilationH)
{
for(x = wstart; x < wend; x += dilationW)
{
tcntr = y*iwidth + x;
scalar_t val = *(ip + tcntr);
if ((val > maxval) || std::isnan(val))
{
maxval = val;
maxindex = tcntr;
}
}
}
/* set output to local max */
*op = maxval;
/* store location of max */
*indp = maxindex;
}
}
}
}
void THNN_(SpatialDilatedMaxPooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
THIndexTensor *indices,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int dilationW,
int dilationH,
bool ceil_mode)
{
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int64_t nInputPlane;
int64_t inputHeight;
int64_t inputWidth;
int64_t outputHeight;
int64_t outputWidth;
scalar_t *input_data;
scalar_t *output_data;
THIndex_t *indices_data;
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(input, NULL, NULL, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
if (input->dim() == 4)
{
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nInputPlane = input->size(dimh-1);
inputHeight = input->size(dimh);
inputWidth = input->size(dimw);
outputHeight = pooling_output_shape<int64_t>(inputHeight, kH, padH, dH, dilationH, ceil_mode);
outputWidth = pooling_output_shape<int64_t>(inputWidth, kW, padW, dW, dilationW, ceil_mode);
/* get contiguous input */
input = THTensor_(newContiguous)(input);
/* resize output */
if (input->dim() == 3)
{
THTensor_(resize3d)(output, nInputPlane, outputHeight, outputWidth);
/* indices will contain the locations for each output point */
THIndexTensor_(resize3d)(indices, nInputPlane, outputHeight, outputWidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
THNN_(SpatialDilatedMaxPooling_updateOutput_frame)
(input_data, output_data,
indices_data,
nInputPlane,
inputWidth, inputHeight,
outputWidth, outputHeight,
kW, kH, dW, dH,
padW, padH,
dilationW, dilationH
);
}
else
{
int64_t p;
THTensor_(resize4d)(output, nbatch, nInputPlane, outputHeight, outputWidth);
/* indices will contain the locations for each output point */
THIndexTensor_(resize4d)(indices, nbatch, nInputPlane, outputHeight, outputWidth);
input_data = input->data<scalar_t>();
output_data = output->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialDilatedMaxPooling_updateOutput_frame)
(input_data+p*nInputPlane*inputWidth*inputHeight,
output_data+p*nInputPlane*outputWidth*outputHeight,
indices_data+p*nInputPlane*outputWidth*outputHeight,
nInputPlane,
inputWidth, inputHeight,
outputWidth, outputHeight,
kW, kH, dW, dH,
padW, padH,
dilationW, dilationH
);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(input);
}
static void THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)(
scalar_t *gradInput_p,
scalar_t *gradOutput_p,
THIndex_t *ind_p,
int64_t nInputPlane,
int64_t inputWidth,
int64_t inputHeight,
int64_t outputWidth,
int64_t outputHeight,
int dW,
int dH)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nInputPlane; k++)
{
scalar_t *gradInput_p_k = gradInput_p + k*inputWidth*inputHeight;
scalar_t *gradOutput_p_k = gradOutput_p + k*outputWidth*outputHeight;
THIndex_t *ind_p_k = ind_p + k*outputWidth*outputHeight;
/* calculate max points */
int64_t i, j;
for(i = 0; i < outputHeight; i++)
{
for(j = 0; j < outputWidth; j++)
{
/* retrieve position of max */
int64_t maxp = ind_p_k[i*outputWidth + j];
if (maxp != -1) {
/* update gradient */
gradInput_p_k[maxp] += gradOutput_p_k[i*outputWidth + j];
}
}
}
}
}
void THNN_(SpatialDilatedMaxPooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
THIndexTensor *indices,
int kW,
int kH,
int dW,
int dH,
int padW,
int padH,
int dilationW,
int dilationH,
bool ceil_mode)
{
int dimw = 2;
int dimh = 1;
int64_t nbatch = 1;
int nInputPlane;
int inputHeight;
int inputWidth;
int outputHeight;
int outputWidth;
scalar_t *gradInput_data;
scalar_t *gradOutput_data;
THIndex_t *indices_data;
THNN_(SpatialDilatedMaxPooling_shapeCheck)
(input, gradOutput, indices, kH, kW, dH, dW,
padH, padW, dilationH, dilationW, ceil_mode);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->dim() == 4) {
nbatch = input->size(0);
dimw++;
dimh++;
}
/* sizes */
nInputPlane = input->size(dimh-1);
inputHeight = input->size(dimh);
inputWidth = input->size(dimw);
outputHeight = gradOutput->size(dimh);
outputWidth = gradOutput->size(dimw);
/* get raw pointers */
gradInput_data = gradInput->data<scalar_t>();
gradOutput_data = gradOutput->data<scalar_t>();
indices_data = THIndexTensor_(data)(indices);
/* backprop */
if (input->dim() == 3)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)
(gradInput_data, gradOutput_data,
indices_data,
nInputPlane,
inputWidth, inputHeight,
outputWidth, outputHeight,
dW, dH);
}
else
{
int64_t p;
#pragma omp parallel for private(p)
for (p = 0; p < nbatch; p++)
{
THNN_(SpatialDilatedMaxPooling_updateGradInput_frame)
(gradInput_data+p*nInputPlane*inputWidth*inputHeight,
gradOutput_data+p*nInputPlane*outputWidth*outputHeight,
indices_data+p*nInputPlane*outputWidth*outputHeight,
nInputPlane,
inputWidth, inputHeight,
outputWidth, outputHeight,
dW, dH);
}
}
/* cleanup */
c10::raw::intrusive_ptr::decref(gradOutput);
}
#endif
|
sum_tree.h | #include <cmath>
#include <memory>
#include <mutex>
#include <vector>
#include <unordered_map>
// #include <iostream>
template<typename>
class SumTreeTestProxy;
template<typename Precision>
struct SumTreeNode {
std::shared_ptr<SumTreeNode> left;
std::shared_ptr<SumTreeNode> right;
std::shared_ptr<SumTreeNode> parent;
long long index = -1;
Precision value{0.0};
std::mutex value_mutex;
};
template<typename Precision>
class SumTree {
public:
SumTree(size_t capacity)
{
root_->index = -2;
depth_ = static_cast<size_t>(std::ceil(std::log2(capacity)));
populateTree(root_, 0);
}
void updateValue(const int index, const Precision value) {
const Precision diff = value - leaves_[index]->value;
updateValue_(leaves_[index], diff);
}
void updateValues(const std::vector<size_t> indices,
const std::vector<Precision> values) {
#pragma omp parallel for
for (size_t idx = 0; idx < indices.size(); idx++) {
updateValue(indices[idx], values[idx]);
}
}
size_t getIndex(const Precision quantile) const {
const Precision query_value = quantile * root_->value;
// std::cout << query_value << std::endl;
return getIndex_(root_, query_value);
}
std::vector<size_t> getIndices(const std::vector<Precision> query_values) const {
std::vector<size_t> indices(query_values.size());
#pragma omp parallel for
for (size_t idx = 0; idx < query_values.size(); idx++) {
indices[idx] = getIndex(query_values[idx]);
}
return indices;
}
Precision getValue(const size_t idx) const {
return leaves_.at(idx)->value;
}
std::vector<Precision> getValues(const std::vector<size_t> indices) const {
std::vector<Precision> values(indices.size());
#pragma omp parallel for
for (size_t i = 0; i < indices.size(); i++) {
values[i] = getValue(indices[i]);
}
return values;
}
Precision getTotalVal() const { return root_->value; }
size_t getCapacity() const { return leaves_.size(); }
private:
void updateValue_(std::shared_ptr<SumTreeNode<Precision>> node,
const Precision diff) {
{
const std::lock_guard<std::mutex> lock(node->value_mutex);
node->value += diff;
}
if (node->index == -2) {
return;
}
updateValue_(node->parent, diff);
}
size_t getIndex_(std::shared_ptr<SumTreeNode<Precision>> node,
Precision query_value) const {
// std::cout << "querying node " << node->value << " with value " << query_value << std::endl;
if (node->index > -1) {
// std::cout << "reached leaf " << node->index << " with value " << node->value << std::endl;
return node->index;
}
if (query_value < node->left->value) {
// std::cout << "going left" << std::endl;
return getIndex_(node->left, query_value);
}
// std::cout << "going right" << std::endl;
return getIndex_(node->right, query_value - node->left->value);
}
void populateTree(std::shared_ptr<SumTreeNode<Precision>> node,
size_t current_depth) {
if (current_depth == depth_) {
node->index = leaves_.size();
// leaves_.push_back(node);
leaves_.insert({node->index, node});
return;
}
node->left = std::shared_ptr<SumTreeNode<Precision>>(
new SumTreeNode<Precision>());
node->left->parent = node;
node->right = std::shared_ptr<SumTreeNode<Precision>>(
new SumTreeNode<Precision>());
node->right->parent = node;
populateTree(node->left, current_depth + 1);
populateTree(node->right, current_depth + 1);
}
size_t depth_{0};
std::shared_ptr<SumTreeNode<Precision>> root_{new SumTreeNode<Precision>()};
std::unordered_map<size_t, std::shared_ptr<SumTreeNode<Precision>>> leaves_;
friend SumTreeTestProxy<Precision>;
};
|
cp-tree.h | /* Definitions for C++ parsing and type checking.
Copyright (C) 1987-2018 Free Software Foundation, Inc.
Contributed by Michael Tiemann (tiemann@cygnus.com)
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
GCC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
#ifndef GCC_CP_TREE_H
#define GCC_CP_TREE_H
#include "tm.h"
#include "hard-reg-set.h"
#include "function.h"
/* In order for the format checking to accept the C++ front end
diagnostic framework extensions, you must include this file before
diagnostic-core.h, not after. We override the definition of GCC_DIAG_STYLE
in c-common.h. */
#undef GCC_DIAG_STYLE
#define GCC_DIAG_STYLE __gcc_cxxdiag__
#if defined(GCC_DIAGNOSTIC_CORE_H) || defined (GCC_C_COMMON_H)
#error \
In order for the format checking to accept the C++ front end diagnostic \
framework extensions, you must include this file before diagnostic-core.h and \
c-common.h, not after.
#endif
#include "c-family/c-common.h"
#include "diagnostic.h"
/* A tree node, together with a location, so that we can track locations
(and ranges) during parsing.
The location is redundant for node kinds that have locations,
but not all node kinds do (e.g. constants, and references to
params, locals, etc), so we stash a copy here. */
class cp_expr
{
public:
cp_expr () :
m_value (NULL), m_loc (UNKNOWN_LOCATION) {}
cp_expr (tree value) :
m_value (value), m_loc (EXPR_LOCATION (m_value)) {}
cp_expr (tree value, location_t loc):
m_value (value), m_loc (loc) {}
cp_expr (const cp_expr &other) :
m_value (other.m_value), m_loc (other.m_loc) {}
/* Implicit conversions to tree. */
operator tree () const { return m_value; }
tree & operator* () { return m_value; }
tree operator* () const { return m_value; }
tree & operator-> () { return m_value; }
tree operator-> () const { return m_value; }
tree get_value () const { return m_value; }
location_t get_location () const { return m_loc; }
location_t get_start () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_start;
}
location_t get_finish () const
{
source_range src_range = get_range_from_loc (line_table, m_loc);
return src_range.m_finish;
}
void set_location (location_t loc)
{
protected_set_expr_location (m_value, loc);
m_loc = loc;
}
void set_range (location_t start, location_t finish)
{
set_location (make_location (m_loc, start, finish));
}
cp_expr& maybe_add_location_wrapper ()
{
m_value = maybe_wrap_with_location (m_value, m_loc);
return *this;
}
private:
tree m_value;
location_t m_loc;
};
inline bool
operator == (const cp_expr &lhs, tree rhs)
{
return lhs.get_value () == rhs;
}
enum cp_tree_index
{
CPTI_WCHAR_DECL,
CPTI_VTABLE_ENTRY_TYPE,
CPTI_DELTA_TYPE,
CPTI_VTABLE_INDEX_TYPE,
CPTI_CLEANUP_TYPE,
CPTI_VTT_PARM_TYPE,
CPTI_CLASS_TYPE,
CPTI_UNKNOWN_TYPE,
CPTI_INIT_LIST_TYPE,
CPTI_VTBL_TYPE,
CPTI_VTBL_PTR_TYPE,
CPTI_STD,
CPTI_ABI,
CPTI_GLOBAL,
CPTI_GLOBAL_TYPE,
CPTI_CONST_TYPE_INFO_TYPE,
CPTI_TYPE_INFO_PTR_TYPE,
CPTI_ABORT_FNDECL,
CPTI_AGGR_TAG,
CPTI_CONV_OP_MARKER,
CPTI_CTOR_IDENTIFIER,
CPTI_COMPLETE_CTOR_IDENTIFIER,
CPTI_BASE_CTOR_IDENTIFIER,
CPTI_DTOR_IDENTIFIER,
CPTI_COMPLETE_DTOR_IDENTIFIER,
CPTI_BASE_DTOR_IDENTIFIER,
CPTI_DELETING_DTOR_IDENTIFIER,
CPTI_CONV_OP_IDENTIFIER,
CPTI_DELTA_IDENTIFIER,
CPTI_IN_CHARGE_IDENTIFIER,
CPTI_VTT_PARM_IDENTIFIER,
CPTI_THIS_IDENTIFIER,
CPTI_PFN_IDENTIFIER,
CPTI_VPTR_IDENTIFIER,
CPTI_GLOBAL_IDENTIFIER,
CPTI_STD_IDENTIFIER,
CPTI_ANON_IDENTIFIER,
CPTI_AUTO_IDENTIFIER,
CPTI_DECLTYPE_AUTO_IDENTIFIER,
CPTI_INIT_LIST_IDENTIFIER,
CPTI_LANG_NAME_C,
CPTI_LANG_NAME_CPLUSPLUS,
CPTI_EMPTY_EXCEPT_SPEC,
CPTI_NOEXCEPT_TRUE_SPEC,
CPTI_NOEXCEPT_FALSE_SPEC,
CPTI_NOEXCEPT_DEFERRED_SPEC,
CPTI_TERMINATE_FN,
CPTI_CALL_UNEXPECTED_FN,
CPTI_GET_EXCEPTION_PTR_FN,
CPTI_BEGIN_CATCH_FN,
CPTI_END_CATCH_FN,
CPTI_ALLOCATE_EXCEPTION_FN,
CPTI_FREE_EXCEPTION_FN,
CPTI_THROW_FN,
CPTI_RETHROW_FN,
CPTI_ATEXIT_FN_PTR_TYPE,
CPTI_ATEXIT,
CPTI_DSO_HANDLE,
CPTI_DCAST,
CPTI_NULLPTR,
CPTI_NULLPTR_TYPE,
CPTI_ALIGN_TYPE,
CPTI_ANY_TARG,
CPTI_MAX
};
extern GTY(()) tree cp_global_trees[CPTI_MAX];
#define wchar_decl_node cp_global_trees[CPTI_WCHAR_DECL]
#define vtable_entry_type cp_global_trees[CPTI_VTABLE_ENTRY_TYPE]
/* The type used to represent an offset by which to adjust the `this'
pointer in pointer-to-member types. */
#define delta_type_node cp_global_trees[CPTI_DELTA_TYPE]
/* The type used to represent an index into the vtable. */
#define vtable_index_type cp_global_trees[CPTI_VTABLE_INDEX_TYPE]
#define class_type_node cp_global_trees[CPTI_CLASS_TYPE]
#define unknown_type_node cp_global_trees[CPTI_UNKNOWN_TYPE]
#define init_list_type_node cp_global_trees[CPTI_INIT_LIST_TYPE]
#define vtbl_type_node cp_global_trees[CPTI_VTBL_TYPE]
#define vtbl_ptr_type_node cp_global_trees[CPTI_VTBL_PTR_TYPE]
#define std_node cp_global_trees[CPTI_STD]
#define abi_node cp_global_trees[CPTI_ABI]
#define global_namespace cp_global_trees[CPTI_GLOBAL]
#define global_type_node cp_global_trees[CPTI_GLOBAL_TYPE]
#define const_type_info_type_node cp_global_trees[CPTI_CONST_TYPE_INFO_TYPE]
#define type_info_ptr_type cp_global_trees[CPTI_TYPE_INFO_PTR_TYPE]
#define conv_op_marker cp_global_trees[CPTI_CONV_OP_MARKER]
#define abort_fndecl cp_global_trees[CPTI_ABORT_FNDECL]
#define current_aggr cp_global_trees[CPTI_AGGR_TAG]
#define nullptr_node cp_global_trees[CPTI_NULLPTR]
#define nullptr_type_node cp_global_trees[CPTI_NULLPTR_TYPE]
/* std::align_val_t */
#define align_type_node cp_global_trees[CPTI_ALIGN_TYPE]
/* We cache these tree nodes so as to call get_identifier less frequently.
For identifiers for functions, including special member functions such
as ctors and assignment operators, the nodes can be used (among other
things) to iterate over their overloads defined by/for a type. For
example:
tree ovlid = assign_op_identifier;
tree overloads = get_class_binding (type, ovlid);
for (ovl_iterator it (overloads); it; ++it) { ... }
iterates over the set of implicitly and explicitly defined overloads
of the assignment operator for type (including the copy and move
assignment operators, whether deleted or not). */
/* The name of a constructor that takes an in-charge parameter to
decide whether or not to construct virtual base classes. */
#define ctor_identifier cp_global_trees[CPTI_CTOR_IDENTIFIER]
/* The name of a constructor that constructs virtual base classes. */
#define complete_ctor_identifier cp_global_trees[CPTI_COMPLETE_CTOR_IDENTIFIER]
/* The name of a constructor that does not construct virtual base classes. */
#define base_ctor_identifier cp_global_trees[CPTI_BASE_CTOR_IDENTIFIER]
/* The name of a destructor that takes an in-charge parameter to
decide whether or not to destroy virtual base classes and whether
or not to delete the object. */
#define dtor_identifier cp_global_trees[CPTI_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes. */
#define complete_dtor_identifier cp_global_trees[CPTI_COMPLETE_DTOR_IDENTIFIER]
/* The name of a destructor that does not destroy virtual base
classes. */
#define base_dtor_identifier cp_global_trees[CPTI_BASE_DTOR_IDENTIFIER]
/* The name of a destructor that destroys virtual base classes, and
then deletes the entire object. */
#define deleting_dtor_identifier cp_global_trees[CPTI_DELETING_DTOR_IDENTIFIER]
#define ovl_op_identifier(ISASS, CODE) (OVL_OP_INFO(ISASS, CODE)->identifier)
#define assign_op_identifier (ovl_op_info[true][OVL_OP_NOP_EXPR].identifier)
#define call_op_identifier (ovl_op_info[false][OVL_OP_CALL_EXPR].identifier)
/* The name used for conversion operators -- but note that actual
conversion functions use special identifiers outside the identifier
table. */
#define conv_op_identifier cp_global_trees[CPTI_CONV_OP_IDENTIFIER]
#define delta_identifier cp_global_trees[CPTI_DELTA_IDENTIFIER]
#define in_charge_identifier cp_global_trees[CPTI_IN_CHARGE_IDENTIFIER]
/* The name of the parameter that contains a pointer to the VTT to use
for this subobject constructor or destructor. */
#define vtt_parm_identifier cp_global_trees[CPTI_VTT_PARM_IDENTIFIER]
#define this_identifier cp_global_trees[CPTI_THIS_IDENTIFIER]
#define pfn_identifier cp_global_trees[CPTI_PFN_IDENTIFIER]
#define vptr_identifier cp_global_trees[CPTI_VPTR_IDENTIFIER]
/* The name of the ::, std & anon namespaces. */
#define global_identifier cp_global_trees[CPTI_GLOBAL_IDENTIFIER]
#define std_identifier cp_global_trees[CPTI_STD_IDENTIFIER]
#define anon_identifier cp_global_trees[CPTI_ANON_IDENTIFIER]
/* auto and declspec(auto) identifiers. */
#define auto_identifier cp_global_trees[CPTI_AUTO_IDENTIFIER]
#define decltype_auto_identifier cp_global_trees[CPTI_DECLTYPE_AUTO_IDENTIFIER]
#define init_list_identifier cp_global_trees[CPTI_INIT_LIST_IDENTIFIER]
#define lang_name_c cp_global_trees[CPTI_LANG_NAME_C]
#define lang_name_cplusplus cp_global_trees[CPTI_LANG_NAME_CPLUSPLUS]
/* Exception specifiers used for throw(), noexcept(true),
noexcept(false) and deferred noexcept. We rely on these being
uncloned. */
#define empty_except_spec cp_global_trees[CPTI_EMPTY_EXCEPT_SPEC]
#define noexcept_true_spec cp_global_trees[CPTI_NOEXCEPT_TRUE_SPEC]
#define noexcept_false_spec cp_global_trees[CPTI_NOEXCEPT_FALSE_SPEC]
#define noexcept_deferred_spec cp_global_trees[CPTI_NOEXCEPT_DEFERRED_SPEC]
/* Exception handling function declarations. */
#define terminate_fn cp_global_trees[CPTI_TERMINATE_FN]
#define call_unexpected_fn cp_global_trees[CPTI_CALL_UNEXPECTED_FN]
#define get_exception_ptr_fn cp_global_trees[CPTI_GET_EXCEPTION_PTR_FN]
#define begin_catch_fn cp_global_trees[CPTI_BEGIN_CATCH_FN]
#define end_catch_fn cp_global_trees[CPTI_END_CATCH_FN]
#define allocate_exception_fn cp_global_trees[CPTI_ALLOCATE_EXCEPTION_FN]
#define free_exception_fn cp_global_trees[CPTI_FREE_EXCEPTION_FN]
#define throw_fn cp_global_trees[CPTI_THROW_FN]
#define rethrow_fn cp_global_trees[CPTI_RETHROW_FN]
/* The type of the function-pointer argument to "__cxa_atexit" (or
"std::atexit", if "__cxa_atexit" is not being used). */
#define atexit_fn_ptr_type_node cp_global_trees[CPTI_ATEXIT_FN_PTR_TYPE]
/* A pointer to `std::atexit'. */
#define atexit_node cp_global_trees[CPTI_ATEXIT]
/* A pointer to `__dso_handle'. */
#define dso_handle_node cp_global_trees[CPTI_DSO_HANDLE]
/* The declaration of the dynamic_cast runtime. */
#define dynamic_cast_node cp_global_trees[CPTI_DCAST]
/* The type of a destructor. */
#define cleanup_type cp_global_trees[CPTI_CLEANUP_TYPE]
/* The type of the vtt parameter passed to subobject constructors and
destructors. */
#define vtt_parm_type cp_global_trees[CPTI_VTT_PARM_TYPE]
/* A node which matches any template argument. */
#define any_targ_node cp_global_trees[CPTI_ANY_TARG]
/* Node to indicate default access. This must be distinct from the
access nodes in tree.h. */
#define access_default_node null_node
#include "name-lookup.h"
/* Usage of TREE_LANG_FLAG_?:
0: IDENTIFIER_KIND_BIT_0 (in IDENTIFIER_NODE)
NEW_EXPR_USE_GLOBAL (in NEW_EXPR).
COND_EXPR_IS_VEC_DELETE (in COND_EXPR).
DELETE_EXPR_USE_GLOBAL (in DELETE_EXPR).
COMPOUND_EXPR_OVERLOADED (in COMPOUND_EXPR).
CLEANUP_P (in TRY_BLOCK)
AGGR_INIT_VIA_CTOR_P (in AGGR_INIT_EXPR)
PTRMEM_OK_P (in ADDR_EXPR, OFFSET_REF, SCOPE_REF)
PAREN_STRING_LITERAL (in STRING_CST)
CP_DECL_THREAD_LOCAL_P (in VAR_DECL)
KOENIG_LOOKUP_P (in CALL_EXPR)
STATEMENT_LIST_NO_SCOPE (in STATEMENT_LIST).
EXPR_STMT_STMT_EXPR_RESULT (in EXPR_STMT)
STMT_EXPR_NO_SCOPE (in STMT_EXPR)
BIND_EXPR_TRY_BLOCK (in BIND_EXPR)
TYPENAME_IS_ENUM_P (in TYPENAME_TYPE)
OMP_FOR_GIMPLIFYING_P (in OMP_FOR, OMP_SIMD, OMP_DISTRIBUTE,
and OMP_TASKLOOP)
BASELINK_QUALIFIED_P (in BASELINK)
TARGET_EXPR_IMPLICIT_P (in TARGET_EXPR)
TEMPLATE_PARM_PARAMETER_PACK (in TEMPLATE_PARM_INDEX)
ATTR_IS_DEPENDENT (in the TREE_LIST for an attribute)
ABI_TAG_IMPLICIT (in the TREE_LIST for the argument of abi_tag)
LAMBDA_CAPTURE_EXPLICIT_P (in a TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST)
CONSTRUCTOR_IS_DIRECT_INIT (in CONSTRUCTOR)
LAMBDA_EXPR_CAPTURES_THIS_P (in LAMBDA_EXPR)
DECLTYPE_FOR_LAMBDA_CAPTURE (in DECLTYPE_TYPE)
VEC_INIT_EXPR_IS_CONSTEXPR (in VEC_INIT_EXPR)
DECL_OVERRIDE_P (in FUNCTION_DECL)
IMPLICIT_CONV_EXPR_DIRECT_INIT (in IMPLICIT_CONV_EXPR)
TRANSACTION_EXPR_IS_STMT (in TRANSACTION_EXPR)
CONVERT_EXPR_VBASE_PATH (in CONVERT_EXPR)
PACK_EXPANSION_LOCAL_P (in *_PACK_EXPANSION)
TINFO_HAS_ACCESS_ERRORS (in TEMPLATE_INFO)
SIZEOF_EXPR_TYPE_P (in SIZEOF_EXPR)
COMPOUND_REQ_NOEXCEPT_P (in COMPOUND_REQ)
WILDCARD_PACK_P (in WILDCARD_DECL)
BLOCK_OUTER_CURLY_BRACE_P (in BLOCK)
FOLD_EXPR_MODOP_P (*_FOLD_EXPR)
IF_STMT_CONSTEXPR_P (IF_STMT)
TEMPLATE_TYPE_PARM_FOR_CLASS (TEMPLATE_TYPE_PARM)
DECL_NAMESPACE_INLINE_P (in NAMESPACE_DECL)
SWITCH_STMT_ALL_CASES_P (in SWITCH_STMT)
REINTERPRET_CAST_P (in NOP_EXPR)
ALIGNOF_EXPR_STD_P (in ALIGNOF_EXPR)
1: IDENTIFIER_KIND_BIT_1 (in IDENTIFIER_NODE)
TI_PENDING_TEMPLATE_FLAG.
TEMPLATE_PARMS_FOR_INLINE.
DELETE_EXPR_USE_VEC (in DELETE_EXPR).
(TREE_CALLS_NEW) (in _EXPR or _REF) (commented-out).
ICS_ELLIPSIS_FLAG (in _CONV)
DECL_INITIALIZED_P (in VAR_DECL)
TYPENAME_IS_CLASS_P (in TYPENAME_TYPE)
STMT_IS_FULL_EXPR_P (in _STMT)
TARGET_EXPR_LIST_INIT_P (in TARGET_EXPR)
LAMBDA_EXPR_MUTABLE_P (in LAMBDA_EXPR)
DECL_FINAL_P (in FUNCTION_DECL)
QUALIFIED_NAME_IS_TEMPLATE (in SCOPE_REF)
DECLTYPE_FOR_INIT_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_NO_IMPLICIT_ZERO (in CONSTRUCTOR)
TINFO_USED_TEMPLATE_ID (in TEMPLATE_INFO)
PACK_EXPANSION_SIZEOF_P (in *_PACK_EXPANSION)
OVL_USING_P (in OVERLOAD)
IMPLICIT_CONV_EXPR_NONTYPE_ARG (in IMPLICIT_CONV_EXPR)
2: IDENTIFIER_KIND_BIT_2 (in IDENTIFIER_NODE)
ICS_THIS_FLAG (in _CONV)
DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (in VAR_DECL)
STATEMENT_LIST_TRY_BLOCK (in STATEMENT_LIST)
TYPENAME_IS_RESOLVING_P (in TYPE_NAME_TYPE)
TARGET_EXPR_DIRECT_INIT_P (in TARGET_EXPR)
FNDECL_USED_AUTO (in FUNCTION_DECL)
DECLTYPE_FOR_LAMBDA_PROXY (in DECLTYPE_TYPE)
REF_PARENTHESIZED_P (in COMPONENT_REF, INDIRECT_REF, SCOPE_REF)
AGGR_INIT_ZERO_FIRST (in AGGR_INIT_EXPR)
CONSTRUCTOR_MUTABLE_POISON (in CONSTRUCTOR)
OVL_HIDDEN_P (in OVERLOAD)
SWITCH_STMT_NO_BREAK_P (in SWITCH_STMT)
LAMBDA_EXPR_CAPTURE_OPTIMIZED (in LAMBDA_EXPR)
3: (TREE_REFERENCE_EXPR) (in NON_LVALUE_EXPR) (commented-out).
ICS_BAD_FLAG (in _CONV)
FN_TRY_BLOCK_P (in TRY_BLOCK)
BIND_EXPR_BODY_BLOCK (in BIND_EXPR)
DECL_NONTRIVIALLY_INITIALIZED_P (in VAR_DECL)
CALL_EXPR_ORDERED_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
DECLTYPE_FOR_REF_CAPTURE (in DECLTYPE_TYPE)
CONSTRUCTOR_C99_COMPOUND_LITERAL (in CONSTRUCTOR)
OVL_NESTED_P (in OVERLOAD)
4: IDENTIFIER_MARKED (IDENTIFIER_NODEs)
TREE_HAS_CONSTRUCTOR (in INDIRECT_REF, SAVE_EXPR, CONSTRUCTOR,
CALL_EXPR, or FIELD_DECL).
DECL_TINFO_P (in VAR_DECL)
FUNCTION_REF_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
OVL_LOOKUP_P (in OVERLOAD)
LOOKUP_FOUND_P (in RECORD_TYPE, UNION_TYPE, NAMESPACE_DECL)
5: IDENTIFIER_VIRTUAL_P (in IDENTIFIER_NODE)
DECL_VTABLE_OR_VTT_P (in VAR_DECL)
FUNCTION_RVALUE_QUALIFIED (in FUNCTION_TYPE, METHOD_TYPE)
CALL_EXPR_REVERSE_ARGS (in CALL_EXPR, AGGR_INIT_EXPR)
CONSTRUCTOR_PLACEHOLDER_BOUNDARY (in CONSTRUCTOR)
6: IDENTIFIER_REPO_CHOSEN (in IDENTIFIER_NODE)
DECL_CONSTRUCTION_VTABLE_P (in VAR_DECL)
TYPE_MARKED_P (in _TYPE)
RANGE_FOR_IVDEP (in RANGE_FOR_STMT)
CALL_EXPR_OPERATOR_SYNTAX (in CALL_EXPR, AGGR_INIT_EXPR)
Usage of TYPE_LANG_FLAG_?:
0: TYPE_DEPENDENT_P
1: TYPE_HAS_USER_CONSTRUCTOR.
2: TYPE_HAS_LATE_RETURN_TYPE (in FUNCTION_TYPE, METHOD_TYPE)
TYPE_PTRMEMFUNC_FLAG (in RECORD_TYPE)
4: TYPE_HAS_NONTRIVIAL_DESTRUCTOR
5: CLASS_TYPE_P (in RECORD_TYPE and UNION_TYPE)
ENUM_FIXED_UNDERLYING_TYPE_P (in ENUMERAL_TYPE)
AUTO_IS_DECLTYPE (in TEMPLATE_TYPE_PARM)
REFERENCE_VLA_OK (in REFERENCE_TYPE)
6: TYPE_DEPENDENT_P_VALID
Usage of DECL_LANG_FLAG_?:
0: DECL_ERROR_REPORTED (in VAR_DECL).
DECL_TEMPLATE_PARM_P (in PARM_DECL, CONST_DECL, TYPE_DECL, or TEMPLATE_DECL)
DECL_LOCAL_FUNCTION_P (in FUNCTION_DECL)
DECL_MUTABLE_P (in FIELD_DECL)
DECL_DEPENDENT_P (in USING_DECL)
LABEL_DECL_BREAK (in LABEL_DECL)
1: C_TYPEDEF_EXPLICITLY_SIGNED (in TYPE_DECL).
DECL_TEMPLATE_INSTANTIATED (in a VAR_DECL or a FUNCTION_DECL)
DECL_MEMBER_TEMPLATE_P (in TEMPLATE_DECL)
USING_DECL_TYPENAME_P (in USING_DECL)
DECL_VLA_CAPTURE_P (in FIELD_DECL)
DECL_ARRAY_PARAMETER_P (in PARM_DECL)
LABEL_DECL_CONTINUE (in LABEL_DECL)
2: DECL_THIS_EXTERN (in VAR_DECL or FUNCTION_DECL).
DECL_IMPLICIT_TYPEDEF_P (in a TYPE_DECL)
DECL_CONSTRAINT_VAR_P (in a PARM_DECL)
TEMPLATE_DECL_COMPLEX_ALIAS_P (in TEMPLATE_DECL)
DECL_INSTANTIATING_NSDMI_P (in a FIELD_DECL)
LABEL_DECL_CDTOR (in LABEL_DECL)
3: DECL_IN_AGGR_P.
4: DECL_C_BIT_FIELD (in a FIELD_DECL)
DECL_ANON_UNION_VAR_P (in a VAR_DECL)
DECL_SELF_REFERENCE_P (in a TYPE_DECL)
DECL_INVALID_OVERRIDER_P (in a FUNCTION_DECL)
5: DECL_INTERFACE_KNOWN.
6: DECL_THIS_STATIC (in VAR_DECL or FUNCTION_DECL).
DECL_FIELD_IS_BASE (in FIELD_DECL)
TYPE_DECL_ALIAS_P (in TYPE_DECL)
7: DECL_DEAD_FOR_LOCAL (in VAR_DECL).
DECL_THUNK_P (in a member FUNCTION_DECL)
DECL_NORMAL_CAPTURE_P (in FIELD_DECL)
8: DECL_DECLARED_CONSTEXPR_P (in VAR_DECL, FUNCTION_DECL)
Usage of language-independent fields in a language-dependent manner:
TYPE_ALIAS_SET
This field is used by TYPENAME_TYPEs, TEMPLATE_TYPE_PARMs, and so
forth as a substitute for the mark bits provided in `lang_type'.
At present, only the six low-order bits are used.
TYPE_LANG_SLOT_1
For a FUNCTION_TYPE or METHOD_TYPE, this is TYPE_RAISES_EXCEPTIONS.
For a POINTER_TYPE (to a METHOD_TYPE), this is TYPE_PTRMEMFUNC_TYPE.
For an ENUMERAL_TYPE, BOUND_TEMPLATE_TEMPLATE_PARM_TYPE,
RECORD_TYPE or UNION_TYPE this is TYPE_TEMPLATE_INFO,
BINFO_VIRTUALS
For a binfo, this is a TREE_LIST. There is an entry for each
virtual function declared either in BINFO or its direct and
indirect primary bases.
The BV_DELTA of each node gives the amount by which to adjust the
`this' pointer when calling the function. If the method is an
overridden version of a base class method, then it is assumed
that, prior to adjustment, the this pointer points to an object
of the base class.
The BV_VCALL_INDEX of each node, if non-NULL, gives the vtable
index of the vcall offset for this entry.
The BV_FN is the declaration for the virtual function itself.
If BV_LOST_PRIMARY is set, it means that this entry is for a lost
primary virtual base and can be left null in the vtable.
BINFO_VTABLE
This is an expression with POINTER_TYPE that gives the value
to which the vptr should be initialized. Use get_vtbl_decl_for_binfo
to extract the VAR_DECL for the complete vtable.
DECL_VINDEX
This field is NULL for a non-virtual function. For a virtual
function, it is eventually set to an INTEGER_CST indicating the
index in the vtable at which this function can be found. When
a virtual function is declared, but before it is known what
function is overridden, this field is the error_mark_node.
Temporarily, it may be set to a TREE_LIST whose TREE_VALUE is
the virtual function this one overrides, and whose TREE_CHAIN is
the old DECL_VINDEX. */
/* Language-specific tree checkers. */
#define VAR_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK2(NODE,VAR_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,TYPE_DECL,TEMPLATE_DECL,FUNCTION_DECL)
#define TYPE_FUNCTION_OR_TEMPLATE_DECL_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == FUNCTION_DECL)
#define VAR_FUNCTION_OR_PARM_DECL_CHECK(NODE) \
TREE_CHECK3(NODE,VAR_DECL,FUNCTION_DECL,PARM_DECL)
#define VAR_TEMPL_TYPE_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK4(NODE,VAR_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK(NODE) \
TREE_CHECK5(NODE,VAR_DECL,FIELD_DECL,FUNCTION_DECL,TYPE_DECL,TEMPLATE_DECL)
#define BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK(NODE) \
TREE_CHECK(NODE,BOUND_TEMPLATE_TEMPLATE_PARM)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define THUNK_FUNCTION_CHECK(NODE) __extension__ \
({ __typeof (NODE) const __t = (NODE); \
if (TREE_CODE (__t) != FUNCTION_DECL || !__t->decl_common.lang_specific \
|| !__t->decl_common.lang_specific->u.fn.thunk_p) \
tree_check_failed (__t, __FILE__, __LINE__, __FUNCTION__, 0); \
__t; })
#else
#define THUNK_FUNCTION_CHECK(NODE) (NODE)
#endif
/* Language-dependent contents of an identifier. */
struct GTY(()) lang_identifier {
struct c_common_identifier c_common;
cxx_binding *bindings;
};
/* Return a typed pointer version of T if it designates a
C++ front-end identifier. */
inline lang_identifier*
identifier_p (tree t)
{
if (TREE_CODE (t) == IDENTIFIER_NODE)
return (lang_identifier*) t;
return NULL;
}
#define LANG_IDENTIFIER_CAST(NODE) \
((struct lang_identifier*)IDENTIFIER_NODE_CHECK (NODE))
struct GTY(()) template_parm_index {
struct tree_common common;
int index;
int level;
int orig_level;
tree decl;
};
struct GTY(()) ptrmem_cst {
struct tree_common common;
tree member;
};
typedef struct ptrmem_cst * ptrmem_cst_t;
#define CLEANUP_P(NODE) TREE_LANG_FLAG_0 (TRY_BLOCK_CHECK (NODE))
#define BIND_EXPR_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_0 (BIND_EXPR_CHECK (NODE))
/* Used to mark the block around the member initializers and cleanups. */
#define BIND_EXPR_BODY_BLOCK(NODE) \
TREE_LANG_FLAG_3 (BIND_EXPR_CHECK (NODE))
#define FUNCTION_NEEDS_BODY_BLOCK(NODE) \
(DECL_CONSTRUCTOR_P (NODE) || DECL_DESTRUCTOR_P (NODE) \
|| LAMBDA_FUNCTION_P (NODE))
#define STATEMENT_LIST_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STATEMENT_LIST_CHECK (NODE))
#define STATEMENT_LIST_TRY_BLOCK(NODE) \
TREE_LANG_FLAG_2 (STATEMENT_LIST_CHECK (NODE))
/* Mark the outer curly brace BLOCK. */
#define BLOCK_OUTER_CURLY_BRACE_P(NODE) TREE_LANG_FLAG_0 (BLOCK_CHECK (NODE))
/* Nonzero if this statement should be considered a full-expression,
i.e., if temporaries created during this statement should have
their destructors run at the end of this statement. */
#define STMT_IS_FULL_EXPR_P(NODE) TREE_LANG_FLAG_1 ((NODE))
/* Marks the result of a statement expression. */
#define EXPR_STMT_STMT_EXPR_RESULT(NODE) \
TREE_LANG_FLAG_0 (EXPR_STMT_CHECK (NODE))
/* Nonzero if this statement-expression does not have an associated scope. */
#define STMT_EXPR_NO_SCOPE(NODE) \
TREE_LANG_FLAG_0 (STMT_EXPR_CHECK (NODE))
#define COND_EXPR_IS_VEC_DELETE(NODE) \
TREE_LANG_FLAG_0 (COND_EXPR_CHECK (NODE))
/* Nonzero if this NOP_EXPR is a reinterpret_cast. Such conversions
are not constexprs. Other NOP_EXPRs are. */
#define REINTERPRET_CAST_P(NODE) \
TREE_LANG_FLAG_0 (NOP_EXPR_CHECK (NODE))
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, in the usual
sense of `same'. */
#define same_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_STRICT)
/* Returns nonzero iff NODE is a declaration for the global function
`main'. */
#define DECL_MAIN_P(NODE) \
(DECL_EXTERN_C_FUNCTION_P (NODE) \
&& DECL_NAME (NODE) != NULL_TREE \
&& MAIN_NAME_P (DECL_NAME (NODE)) \
&& flag_hosted)
/* Lookup walker marking. */
#define LOOKUP_SEEN_P(NODE) TREE_VISITED(NODE)
#define LOOKUP_FOUND_P(NODE) \
TREE_LANG_FLAG_4 (TREE_CHECK3(NODE,RECORD_TYPE,UNION_TYPE,NAMESPACE_DECL))
/* These two accessors should only be used by OVL manipulators.
Other users should use iterators and convenience functions. */
#define OVL_FUNCTION(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->function)
#define OVL_CHAIN(NODE) \
(((struct tree_overload*)OVERLOAD_CHECK (NODE))->common.chain)
/* If set, this was imported in a using declaration. */
#define OVL_USING_P(NODE) TREE_LANG_FLAG_1 (OVERLOAD_CHECK (NODE))
/* If set, this overload is a hidden decl. */
#define OVL_HIDDEN_P(NODE) TREE_LANG_FLAG_2 (OVERLOAD_CHECK (NODE))
/* If set, this overload contains a nested overload. */
#define OVL_NESTED_P(NODE) TREE_LANG_FLAG_3 (OVERLOAD_CHECK (NODE))
/* If set, this overload was constructed during lookup. */
#define OVL_LOOKUP_P(NODE) TREE_LANG_FLAG_4 (OVERLOAD_CHECK (NODE))
/* If set, this is a persistant lookup. */
#define OVL_USED_P(NODE) TREE_USED (OVERLOAD_CHECK (NODE))
/* The first decl of an overload. */
#define OVL_FIRST(NODE) ovl_first (NODE)
/* The name of the overload set. */
#define OVL_NAME(NODE) DECL_NAME (OVL_FIRST (NODE))
/* Whether this is a set of overloaded functions. TEMPLATE_DECLS are
always wrapped in an OVERLOAD, so we don't need to check them
here. */
#define OVL_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || TREE_CODE (NODE) == OVERLOAD)
/* Whether this is a single member overload. */
#define OVL_SINGLE_P(NODE) \
(TREE_CODE (NODE) != OVERLOAD || !OVL_CHAIN (NODE))
/* OVL_HIDDEN_P nodes come first, then OVL_USING_P nodes, then regular
fns. */
struct GTY(()) tree_overload {
struct tree_common common;
tree function;
};
/* Iterator for a 1 dimensional overload. Permits iterating over the
outer level of a 2-d overload when explicitly enabled. */
class ovl_iterator
{
tree ovl;
const bool allow_inner; /* Only used when checking. */
public:
explicit ovl_iterator (tree o, bool allow = false)
: ovl (o), allow_inner (allow)
{
}
private:
/* Do not duplicate. */
ovl_iterator &operator= (const ovl_iterator &);
ovl_iterator (const ovl_iterator &);
public:
operator bool () const
{
return ovl;
}
ovl_iterator &operator++ ()
{
ovl = TREE_CODE (ovl) != OVERLOAD ? NULL_TREE : OVL_CHAIN (ovl);
return *this;
}
tree operator* () const
{
tree fn = TREE_CODE (ovl) != OVERLOAD ? ovl : OVL_FUNCTION (ovl);
/* Check this is not an unexpected 2-dimensional overload. */
gcc_checking_assert (allow_inner || TREE_CODE (fn) != OVERLOAD);
return fn;
}
public:
/* Whether this overload was introduced by a using decl. */
bool using_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_USING_P (ovl);
}
bool hidden_p () const
{
return TREE_CODE (ovl) == OVERLOAD && OVL_HIDDEN_P (ovl);
}
public:
tree remove_node (tree head)
{
return remove_node (head, ovl);
}
tree reveal_node (tree head)
{
return reveal_node (head, ovl);
}
protected:
/* If we have a nested overload, point at the inner overload and
return the next link on the outer one. */
tree maybe_push ()
{
tree r = NULL_TREE;
if (ovl && TREE_CODE (ovl) == OVERLOAD && OVL_NESTED_P (ovl))
{
r = OVL_CHAIN (ovl);
ovl = OVL_FUNCTION (ovl);
}
return r;
}
/* Restore an outer nested overload. */
void pop (tree outer)
{
gcc_checking_assert (!ovl);
ovl = outer;
}
private:
/* We make these static functions to avoid the address of the
iterator escaping the local context. */
static tree remove_node (tree head, tree node);
static tree reveal_node (tree ovl, tree node);
};
/* Iterator over a (potentially) 2 dimensional overload, which is
produced by name lookup. */
class lkp_iterator : public ovl_iterator
{
typedef ovl_iterator parent;
tree outer;
public:
explicit lkp_iterator (tree o)
: parent (o, true), outer (maybe_push ())
{
}
public:
lkp_iterator &operator++ ()
{
bool repush = !outer;
if (!parent::operator++ () && !repush)
{
pop (outer);
repush = true;
}
if (repush)
outer = maybe_push ();
return *this;
}
};
/* hash traits for declarations. Hashes potential overload sets via
DECL_NAME. */
struct named_decl_hash : ggc_remove <tree>
{
typedef tree value_type; /* A DECL or OVERLOAD */
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (const value_type decl);
inline static bool equal (const value_type existing, compare_type candidate);
static inline void mark_empty (value_type &p) {p = NULL_TREE;}
static inline bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
static bool is_deleted (value_type) { return false; }
static void mark_deleted (value_type) { gcc_unreachable (); }
};
struct GTY(()) tree_template_decl {
struct tree_decl_common common;
tree arguments;
tree result;
};
/* Returns true iff NODE is a BASELINK. */
#define BASELINK_P(NODE) \
(TREE_CODE (NODE) == BASELINK)
/* The BINFO indicating the base in which lookup found the
BASELINK_FUNCTIONS. */
#define BASELINK_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->binfo)
/* The functions referred to by the BASELINK; either a FUNCTION_DECL,
a TEMPLATE_DECL, an OVERLOAD, or a TEMPLATE_ID_EXPR. */
#define BASELINK_FUNCTIONS(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->functions)
/* If T is a BASELINK, grab the functions, otherwise just T, which is
expected to already be a (list of) functions. */
#define MAYBE_BASELINK_FUNCTIONS(T) \
(BASELINK_P (T) ? BASELINK_FUNCTIONS (T) : T)
/* The BINFO in which the search for the functions indicated by this baselink
began. This base is used to determine the accessibility of functions
selected by overload resolution. */
#define BASELINK_ACCESS_BINFO(NODE) \
(((struct tree_baselink*) BASELINK_CHECK (NODE))->access_binfo)
/* For a type-conversion operator, the BASELINK_OPTYPE indicates the type
to which the conversion should occur. This value is important if
the BASELINK_FUNCTIONS include a template conversion operator --
the BASELINK_OPTYPE can be used to determine what type the user
requested. */
#define BASELINK_OPTYPE(NODE) \
(TREE_CHAIN (BASELINK_CHECK (NODE)))
/* Nonzero if this baselink was from a qualified lookup. */
#define BASELINK_QUALIFIED_P(NODE) \
TREE_LANG_FLAG_0 (BASELINK_CHECK (NODE))
struct GTY(()) tree_baselink {
struct tree_common common;
tree binfo;
tree functions;
tree access_binfo;
};
/* The different kinds of ids that we encounter. */
enum cp_id_kind
{
/* Not an id at all. */
CP_ID_KIND_NONE,
/* An unqualified-id that is not a template-id. */
CP_ID_KIND_UNQUALIFIED,
/* An unqualified-id that is a dependent name. */
CP_ID_KIND_UNQUALIFIED_DEPENDENT,
/* An unqualified template-id. */
CP_ID_KIND_TEMPLATE_ID,
/* A qualified-id. */
CP_ID_KIND_QUALIFIED
};
/* The various kinds of C++0x warnings we encounter. */
enum cpp0x_warn_str
{
/* extended initializer lists */
CPP0X_INITIALIZER_LISTS,
/* explicit conversion operators */
CPP0X_EXPLICIT_CONVERSION,
/* variadic templates */
CPP0X_VARIADIC_TEMPLATES,
/* lambda expressions */
CPP0X_LAMBDA_EXPR,
/* C++0x auto */
CPP0X_AUTO,
/* scoped enums */
CPP0X_SCOPED_ENUMS,
/* defaulted and deleted functions */
CPP0X_DEFAULTED_DELETED,
/* inline namespaces */
CPP0X_INLINE_NAMESPACES,
/* override controls, override/final */
CPP0X_OVERRIDE_CONTROLS,
/* non-static data member initializers */
CPP0X_NSDMI,
/* user defined literals */
CPP0X_USER_DEFINED_LITERALS,
/* delegating constructors */
CPP0X_DELEGATING_CTORS,
/* inheriting constructors */
CPP0X_INHERITING_CTORS,
/* C++11 attributes */
CPP0X_ATTRIBUTES,
/* ref-qualified member functions */
CPP0X_REF_QUALIFIER
};
/* The various kinds of operation used by composite_pointer_type. */
enum composite_pointer_operation
{
/* comparison */
CPO_COMPARISON,
/* conversion */
CPO_CONVERSION,
/* conditional expression */
CPO_CONDITIONAL_EXPR
};
/* Possible cases of expression list used by build_x_compound_expr_from_list. */
enum expr_list_kind {
ELK_INIT, /* initializer */
ELK_MEM_INIT, /* member initializer */
ELK_FUNC_CAST /* functional cast */
};
/* Possible cases of implicit bad rhs conversions. */
enum impl_conv_rhs {
ICR_DEFAULT_ARGUMENT, /* default argument */
ICR_CONVERTING, /* converting */
ICR_INIT, /* initialization */
ICR_ARGPASS, /* argument passing */
ICR_RETURN, /* return */
ICR_ASSIGN /* assignment */
};
/* Possible cases of implicit or explicit bad conversions to void. */
enum impl_conv_void {
ICV_CAST, /* (explicit) conversion to void */
ICV_SECOND_OF_COND, /* second operand of conditional expression */
ICV_THIRD_OF_COND, /* third operand of conditional expression */
ICV_RIGHT_OF_COMMA, /* right operand of comma operator */
ICV_LEFT_OF_COMMA, /* left operand of comma operator */
ICV_STATEMENT, /* statement */
ICV_THIRD_IN_FOR /* for increment expression */
};
/* Possible invalid uses of an abstract class that might not have a
specific associated declaration. */
enum GTY(()) abstract_class_use {
ACU_UNKNOWN, /* unknown or decl provided */
ACU_CAST, /* cast to abstract class */
ACU_NEW, /* new-expression of abstract class */
ACU_THROW, /* throw-expression of abstract class */
ACU_CATCH, /* catch-parameter of abstract class */
ACU_ARRAY, /* array of abstract class */
ACU_RETURN, /* return type of abstract class */
ACU_PARM /* parameter type of abstract class */
};
/* Macros for access to language-specific slots in an identifier. */
/* The IDENTIFIER_BINDING is the innermost cxx_binding for the
identifier. Its PREVIOUS is the next outermost binding. Each
VALUE field is a DECL for the associated declaration. Thus,
name lookup consists simply of pulling off the node at the front
of the list (modulo oddities for looking up the names of types,
and such.) You can use SCOPE field to determine the scope
that bound the name. */
#define IDENTIFIER_BINDING(NODE) \
(LANG_IDENTIFIER_CAST (NODE)->bindings)
/* TREE_TYPE only indicates on local and class scope the current
type. For namespace scope, the presence of a type in any namespace
is indicated with global_type_node, and the real type behind must
be found through lookup. */
#define IDENTIFIER_TYPE_VALUE(NODE) identifier_type_value (NODE)
#define REAL_IDENTIFIER_TYPE_VALUE(NODE) TREE_TYPE (NODE)
#define SET_IDENTIFIER_TYPE_VALUE(NODE,TYPE) (TREE_TYPE (NODE) = (TYPE))
#define IDENTIFIER_HAS_TYPE_VALUE(NODE) (IDENTIFIER_TYPE_VALUE (NODE) ? 1 : 0)
/* Kinds of identifiers. Values are carefully chosen. */
enum cp_identifier_kind {
cik_normal = 0, /* Not a special identifier. */
cik_keyword = 1, /* A keyword. */
cik_ctor = 2, /* Constructor (in-chg, complete or base). */
cik_dtor = 3, /* Destructor (in-chg, deleting, complete or
base). */
cik_simple_op = 4, /* Non-assignment operator name. */
cik_assign_op = 5, /* An assignment operator name. */
cik_conv_op = 6, /* Conversion operator name. */
cik_reserved_for_udlit = 7, /* Not yet in use */
cik_max
};
/* Kind bits. */
#define IDENTIFIER_KIND_BIT_0(NODE) \
TREE_LANG_FLAG_0 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_1(NODE) \
TREE_LANG_FLAG_1 (IDENTIFIER_NODE_CHECK (NODE))
#define IDENTIFIER_KIND_BIT_2(NODE) \
TREE_LANG_FLAG_2 (IDENTIFIER_NODE_CHECK (NODE))
/* Used by various search routines. */
#define IDENTIFIER_MARKED(NODE) \
TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (NODE))
/* Nonzero if this identifier is used as a virtual function name somewhere
(optimizes searches). */
#define IDENTIFIER_VIRTUAL_P(NODE) \
TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (NODE))
/* True iff NAME is the DECL_ASSEMBLER_NAME for an entity with vague
linkage which the prelinker has assigned to this translation
unit. */
#define IDENTIFIER_REPO_CHOSEN(NAME) \
(TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (NAME)))
/* True if this identifier is a reserved word. C_RID_CODE (node) is
then the RID_* value of the keyword. Value 1. */
#define IDENTIFIER_KEYWORD_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a constructor or
destructor. Value 2 or 3. */
#define IDENTIFIER_CDTOR_P(NODE) \
((!IDENTIFIER_KIND_BIT_2 (NODE)) \
& IDENTIFIER_KIND_BIT_1 (NODE))
/* True if this identifier is the name of a constructor. Value 2. */
#define IDENTIFIER_CTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is the name of a destructor. Value 3. */
#define IDENTIFIER_DTOR_P(NODE) \
(IDENTIFIER_CDTOR_P(NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is for any operator name (including
conversions). Value 4, 5, 6 or 7. */
#define IDENTIFIER_ANY_OP_P(NODE) \
(IDENTIFIER_KIND_BIT_2 (NODE))
/* True if this identifier is for an overloaded operator. Values 4, 5. */
#define IDENTIFIER_OVL_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& (!IDENTIFIER_KIND_BIT_1 (NODE)))
/* True if this identifier is for any assignment. Values 5. */
#define IDENTIFIER_ASSIGN_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_0 (NODE))
/* True if this identifier is the name of a type-conversion
operator. Value 7. */
#define IDENTIFIER_CONV_OP_P(NODE) \
(IDENTIFIER_ANY_OP_P (NODE) \
& IDENTIFIER_KIND_BIT_1 (NODE) \
& (!IDENTIFIER_KIND_BIT_0 (NODE)))
/* True if this identifier is a new or delete operator. */
#define IDENTIFIER_NEWDEL_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& IDENTIFIER_OVL_OP_FLAGS (NODE) & OVL_OP_FLAG_ALLOC)
/* True if this identifier is a new operator. */
#define IDENTIFIER_NEW_OP_P(NODE) \
(IDENTIFIER_OVL_OP_P (NODE) \
&& (IDENTIFIER_OVL_OP_FLAGS (NODE) \
& (OVL_OP_FLAG_ALLOC | OVL_OP_FLAG_DELETE)) == OVL_OP_FLAG_ALLOC)
/* Access a C++-specific index for identifier NODE.
Used to optimize operator mappings etc. */
#define IDENTIFIER_CP_INDEX(NODE) \
(IDENTIFIER_NODE_CHECK(NODE)->base.u.bits.address_space)
/* In a RECORD_TYPE or UNION_TYPE, nonzero if any component is read-only. */
#define C_TYPE_FIELDS_READONLY(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->fields_readonly)
/* The tokens stored in the default argument. */
#define DEFARG_TOKENS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->tokens)
#define DEFARG_INSTANTIATIONS(NODE) \
(((struct tree_default_arg *)DEFAULT_ARG_CHECK (NODE))->instantiations)
struct GTY (()) tree_default_arg {
struct tree_common common;
struct cp_token_cache *tokens;
vec<tree, va_gc> *instantiations;
};
#define DEFERRED_NOEXCEPT_PATTERN(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->pattern)
#define DEFERRED_NOEXCEPT_ARGS(NODE) \
(((struct tree_deferred_noexcept *)DEFERRED_NOEXCEPT_CHECK (NODE))->args)
#define DEFERRED_NOEXCEPT_SPEC_P(NODE) \
((NODE) && (TREE_PURPOSE (NODE)) \
&& (TREE_CODE (TREE_PURPOSE (NODE)) == DEFERRED_NOEXCEPT))
#define UNEVALUATED_NOEXCEPT_SPEC_P(NODE) \
(DEFERRED_NOEXCEPT_SPEC_P (NODE) \
&& DEFERRED_NOEXCEPT_PATTERN (TREE_PURPOSE (NODE)) == NULL_TREE)
struct GTY (()) tree_deferred_noexcept {
struct tree_base base;
tree pattern;
tree args;
};
/* The condition associated with the static assertion. This must be
an integral constant expression. */
#define STATIC_ASSERT_CONDITION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->condition)
/* The message associated with the static assertion. This must be a
string constant, which will be emitted as an error message when the
static assert condition is false. */
#define STATIC_ASSERT_MESSAGE(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->message)
/* Source location information for a static assertion. */
#define STATIC_ASSERT_SOURCE_LOCATION(NODE) \
(((struct tree_static_assert *)STATIC_ASSERT_CHECK (NODE))->location)
struct GTY (()) tree_static_assert {
struct tree_common common;
tree condition;
tree message;
location_t location;
};
struct GTY (()) tree_argument_pack_select {
struct tree_common common;
tree argument_pack;
int index;
};
/* The different kinds of traits that we encounter. */
enum cp_trait_kind
{
CPTK_BASES,
CPTK_DIRECT_BASES,
CPTK_HAS_NOTHROW_ASSIGN,
CPTK_HAS_NOTHROW_CONSTRUCTOR,
CPTK_HAS_NOTHROW_COPY,
CPTK_HAS_TRIVIAL_ASSIGN,
CPTK_HAS_TRIVIAL_CONSTRUCTOR,
CPTK_HAS_TRIVIAL_COPY,
CPTK_HAS_TRIVIAL_DESTRUCTOR,
CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS,
CPTK_HAS_VIRTUAL_DESTRUCTOR,
CPTK_IS_ABSTRACT,
CPTK_IS_AGGREGATE,
CPTK_IS_BASE_OF,
CPTK_IS_CLASS,
CPTK_IS_EMPTY,
CPTK_IS_ENUM,
CPTK_IS_FINAL,
CPTK_IS_LITERAL_TYPE,
CPTK_IS_POD,
CPTK_IS_POLYMORPHIC,
CPTK_IS_SAME_AS,
CPTK_IS_STD_LAYOUT,
CPTK_IS_TRIVIAL,
CPTK_IS_TRIVIALLY_ASSIGNABLE,
CPTK_IS_TRIVIALLY_CONSTRUCTIBLE,
CPTK_IS_TRIVIALLY_COPYABLE,
CPTK_IS_UNION,
CPTK_UNDERLYING_TYPE,
CPTK_IS_ASSIGNABLE,
CPTK_IS_CONSTRUCTIBLE
};
/* The types that we are processing. */
#define TRAIT_EXPR_TYPE1(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type1)
#define TRAIT_EXPR_TYPE2(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->type2)
/* The specific trait that we are processing. */
#define TRAIT_EXPR_KIND(NODE) \
(((struct tree_trait_expr *)TRAIT_EXPR_CHECK (NODE))->kind)
struct GTY (()) tree_trait_expr {
struct tree_common common;
tree type1;
tree type2;
enum cp_trait_kind kind;
};
/* Based off of TYPE_UNNAMED_P. */
#define LAMBDA_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_LAMBDA_EXPR (NODE))
/* Test if FUNCTION_DECL is a lambda function. */
#define LAMBDA_FUNCTION_P(FNDECL) \
(DECL_DECLARES_FUNCTION_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_P (FNDECL) \
&& DECL_OVERLOADED_OPERATOR_IS (FNDECL, CALL_EXPR) \
&& LAMBDA_TYPE_P (CP_DECL_CONTEXT (FNDECL)))
enum cp_lambda_default_capture_mode_type {
CPLD_NONE,
CPLD_COPY,
CPLD_REFERENCE
};
/* The method of default capture, if any. */
#define LAMBDA_EXPR_DEFAULT_CAPTURE_MODE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->default_capture_mode)
/* The capture-list, including `this'. Each capture is stored as a FIELD_DECL
* so that the name, type, and field are all together, whether or not it has
* been added to the lambda's class type.
TREE_LIST:
TREE_PURPOSE: The FIELD_DECL for this capture.
TREE_VALUE: The initializer. This is part of a GNU extension. */
#define LAMBDA_EXPR_CAPTURE_LIST(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->capture_list)
/* During parsing of the lambda-introducer, the node in the capture-list
that holds the 'this' capture. During parsing of the body, the
capture proxy for that node. */
#define LAMBDA_EXPR_THIS_CAPTURE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->this_capture)
/* Predicate tracking whether `this' is in the effective capture set. */
#define LAMBDA_EXPR_CAPTURES_THIS_P(NODE) \
LAMBDA_EXPR_THIS_CAPTURE(NODE)
/* Predicate tracking whether the lambda was declared 'mutable'. */
#define LAMBDA_EXPR_MUTABLE_P(NODE) \
TREE_LANG_FLAG_1 (LAMBDA_EXPR_CHECK (NODE))
/* True iff uses of a const variable capture were optimized away. */
#define LAMBDA_EXPR_CAPTURE_OPTIMIZED(NODE) \
TREE_LANG_FLAG_2 (LAMBDA_EXPR_CHECK (NODE))
/* True if this TREE_LIST in LAMBDA_EXPR_CAPTURE_LIST is for an explicit
capture. */
#define LAMBDA_CAPTURE_EXPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* The source location of the lambda. */
#define LAMBDA_EXPR_LOCATION(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->locus)
/* The mangling scope for the lambda: FUNCTION_DECL, PARM_DECL, VAR_DECL,
FIELD_DECL or NULL_TREE. If this is NULL_TREE, we have no linkage. */
#define LAMBDA_EXPR_EXTRA_SCOPE(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->extra_scope)
/* If EXTRA_SCOPE, this is the number of the lambda within that scope. */
#define LAMBDA_EXPR_DISCRIMINATOR(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->discriminator)
/* During parsing of the lambda, a vector of capture proxies which need
to be pushed once we're done processing a nested lambda. */
#define LAMBDA_EXPR_PENDING_PROXIES(NODE) \
(((struct tree_lambda_expr *)LAMBDA_EXPR_CHECK (NODE))->pending_proxies)
/* The closure type of the lambda, which is also the type of the
LAMBDA_EXPR. */
#define LAMBDA_EXPR_CLOSURE(NODE) \
(TREE_TYPE (LAMBDA_EXPR_CHECK (NODE)))
struct GTY (()) tree_lambda_expr
{
struct tree_typed typed;
tree capture_list;
tree this_capture;
tree extra_scope;
vec<tree, va_gc> *pending_proxies;
location_t locus;
enum cp_lambda_default_capture_mode_type default_capture_mode;
int discriminator;
};
/* A (typedef,context,usage location) triplet.
It represents a typedef used through a
context at a given source location.
e.g.
struct foo {
typedef int myint;
};
struct bar {
foo::myint v; // #1<-- this location.
};
In bar, the triplet will be (myint, foo, #1).
*/
struct GTY(()) qualified_typedef_usage_s {
tree typedef_decl;
tree context;
location_t locus;
};
typedef struct qualified_typedef_usage_s qualified_typedef_usage_t;
/* Non-zero if this template specialization has access violations that
should be rechecked when the function is instantiated outside argument
deduction. */
#define TINFO_HAS_ACCESS_ERRORS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_INFO_CHECK (NODE)))
#define FNDECL_HAS_ACCESS_ERRORS(NODE) \
(TINFO_HAS_ACCESS_ERRORS (DECL_TEMPLATE_INFO (NODE)))
/* Non-zero if this variable template specialization was specified using a
template-id, so it's a partial or full specialization and not a definition
of the member template of a particular class specialization. */
#define TINFO_USED_TEMPLATE_ID(NODE) \
(TREE_LANG_FLAG_1 (TEMPLATE_INFO_CHECK (NODE)))
struct GTY(()) tree_template_info {
struct tree_common common;
vec<qualified_typedef_usage_t, va_gc> *typedefs_needing_access_checking;
};
// Constraint information for a C++ declaration. Constraint information is
// comprised of:
//
// - a constraint expression introduced by the template header
// - a constraint expression introduced by a function declarator
// - the associated constraints, which are the conjunction of those,
// and used for declaration matching
//
// The template and declarator requirements are kept to support pretty
// printing constrained declarations.
struct GTY(()) tree_constraint_info {
struct tree_base base;
tree template_reqs;
tree declarator_reqs;
tree associated_constr;
};
// Require that pointer P is non-null before returning.
template<typename T>
inline T*
check_nonnull (T* p)
{
gcc_assert (p);
return p;
}
// Returns true iff T is non-null and represents constraint info.
inline tree_constraint_info *
check_constraint_info (tree t)
{
if (t && TREE_CODE (t) == CONSTRAINT_INFO)
return (tree_constraint_info *)t;
return NULL;
}
// Access the expression describing the template constraints. This may be
// null if no constraints were introduced in the template parameter list,
// a requirements clause after the template parameter list, or constraints
// through a constrained-type-specifier.
#define CI_TEMPLATE_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->template_reqs
// Access the expression describing the trailing constraints. This is non-null
// for any implicit instantiation of a constrained declaration. For a
// templated declaration it is non-null only when a trailing requires-clause
// was specified.
#define CI_DECLARATOR_REQS(NODE) \
check_constraint_info (check_nonnull(NODE))->declarator_reqs
// The computed associated constraint expression for a declaration.
#define CI_ASSOCIATED_CONSTRAINTS(NODE) \
check_constraint_info (check_nonnull(NODE))->associated_constr
// Access the logical constraints on the template parameters introduced
// at a given template parameter list level indicated by NODE.
#define TEMPLATE_PARMS_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
// Access the logical constraints on the template parameter declaration
// indicated by NODE.
#define TEMPLATE_PARM_CONSTRAINTS(NODE) \
TREE_TYPE (TREE_LIST_CHECK (NODE))
/* Non-zero if the noexcept is present in a compound requirement. */
#define COMPOUND_REQ_NOEXCEPT_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK (NODE, COMPOUND_REQ))
/* The constraints on an 'auto' placeholder type, used in an argument deduction
constraint. */
#define PLACEHOLDER_TYPE_CONSTRAINTS(NODE) \
DECL_SIZE_UNIT (TYPE_NAME (NODE))
/* The expression evaluated by the predicate constraint. */
#define PRED_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PRED_CONSTR), 0)
/* The concept of a concept check. */
#define CHECK_CONSTR_CONCEPT(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 0)
/* The template arguments of a concept check. */
#define CHECK_CONSTR_ARGS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, CHECK_CONSTR), 1)
/* The expression validated by the predicate constraint. */
#define EXPR_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXPR_CONSTR), 0)
/* The type validated by the predicate constraint. */
#define TYPE_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, TYPE_CONSTR), 0)
/* In an implicit conversion constraint, the source expression. */
#define ICONV_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 0)
/* In an implicit conversion constraint, the target type. */
#define ICONV_CONSTR_TYPE(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, ICONV_CONSTR), 1)
/* In an argument deduction constraint, the source expression. */
#define DEDUCT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 0)
/* In an argument deduction constraint, the target type pattern. */
#define DEDUCT_CONSTR_PATTERN(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 1)
/* In an argument deduction constraint, the list of placeholder nodes. */
#define DEDUCT_CONSTR_PLACEHOLDER(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, DEDUCT_CONSTR), 2)
/* The expression of an exception constraint. */
#define EXCEPT_CONSTR_EXPR(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, EXCEPT_CONSTR), 0)
/* In a parameterized constraint, the local parameters. */
#define PARM_CONSTR_PARMS(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 0)
/* In a parameterized constraint, the operand. */
#define PARM_CONSTR_OPERAND(NODE) \
TREE_OPERAND (TREE_CHECK (NODE, PARM_CONSTR), 1)
/* Whether a PARM_DECL represents a local parameter in a
requires-expression. */
#define CONSTRAINT_VAR_P(NODE) \
DECL_LANG_FLAG_2 (TREE_CHECK (NODE, PARM_DECL))
/* The concept constraining this constrained template-parameter. */
#define CONSTRAINED_PARM_CONCEPT(NODE) \
DECL_SIZE_UNIT (TYPE_DECL_CHECK (NODE))
/* Any extra template arguments specified for a constrained
template-parameter. */
#define CONSTRAINED_PARM_EXTRA_ARGS(NODE) \
DECL_SIZE (TYPE_DECL_CHECK (NODE))
/* The first template parameter of CONSTRAINED_PARM_CONCEPT to be used as a
prototype for the constrained parameter in finish_shorthand_constraint,
attached for convenience. */
#define CONSTRAINED_PARM_PROTOTYPE(NODE) \
DECL_INITIAL (TYPE_DECL_CHECK (NODE))
enum cp_tree_node_structure_enum {
TS_CP_GENERIC,
TS_CP_IDENTIFIER,
TS_CP_TPI,
TS_CP_PTRMEM,
TS_CP_OVERLOAD,
TS_CP_BASELINK,
TS_CP_TEMPLATE_DECL,
TS_CP_DEFAULT_ARG,
TS_CP_DEFERRED_NOEXCEPT,
TS_CP_STATIC_ASSERT,
TS_CP_ARGUMENT_PACK_SELECT,
TS_CP_TRAIT_EXPR,
TS_CP_LAMBDA_EXPR,
TS_CP_TEMPLATE_INFO,
TS_CP_CONSTRAINT_INFO,
TS_CP_USERDEF_LITERAL
};
/* The resulting tree type. */
union GTY((desc ("cp_tree_node_structure (&%h)"),
chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node {
union tree_node GTY ((tag ("TS_CP_GENERIC"),
desc ("tree_node_structure (&%h)"))) generic;
struct template_parm_index GTY ((tag ("TS_CP_TPI"))) tpi;
struct ptrmem_cst GTY ((tag ("TS_CP_PTRMEM"))) ptrmem;
struct tree_overload GTY ((tag ("TS_CP_OVERLOAD"))) overload;
struct tree_baselink GTY ((tag ("TS_CP_BASELINK"))) baselink;
struct tree_template_decl GTY ((tag ("TS_CP_TEMPLATE_DECL"))) template_decl;
struct tree_default_arg GTY ((tag ("TS_CP_DEFAULT_ARG"))) default_arg;
struct tree_deferred_noexcept GTY ((tag ("TS_CP_DEFERRED_NOEXCEPT"))) deferred_noexcept;
struct lang_identifier GTY ((tag ("TS_CP_IDENTIFIER"))) identifier;
struct tree_static_assert GTY ((tag ("TS_CP_STATIC_ASSERT")))
static_assertion;
struct tree_argument_pack_select GTY ((tag ("TS_CP_ARGUMENT_PACK_SELECT")))
argument_pack_select;
struct tree_trait_expr GTY ((tag ("TS_CP_TRAIT_EXPR")))
trait_expression;
struct tree_lambda_expr GTY ((tag ("TS_CP_LAMBDA_EXPR")))
lambda_expression;
struct tree_template_info GTY ((tag ("TS_CP_TEMPLATE_INFO")))
template_info;
struct tree_constraint_info GTY ((tag ("TS_CP_CONSTRAINT_INFO")))
constraint_info;
struct tree_userdef_literal GTY ((tag ("TS_CP_USERDEF_LITERAL")))
userdef_literal;
};
/* Global state. */
struct GTY(()) saved_scope {
vec<cxx_saved_binding, va_gc> *old_bindings;
tree old_namespace;
vec<tree, va_gc> *decl_ns_list;
tree class_name;
tree class_type;
tree access_specifier;
tree function_decl;
vec<tree, va_gc> *lang_base;
tree lang_name;
tree template_parms;
cp_binding_level *x_previous_class_level;
tree x_saved_tree;
/* Only used for uses of this in trailing return type. */
tree x_current_class_ptr;
tree x_current_class_ref;
int x_processing_template_decl;
int x_processing_specialization;
BOOL_BITFIELD x_processing_explicit_instantiation : 1;
BOOL_BITFIELD need_pop_function_context : 1;
/* Nonzero if we are parsing the discarded statement of a constexpr
if-statement. */
BOOL_BITFIELD discarded_stmt : 1;
int unevaluated_operand;
int inhibit_evaluation_warnings;
int noexcept_operand;
/* If non-zero, implicit "omp declare target" attribute is added into the
attribute lists. */
int omp_declare_target_attribute;
struct stmt_tree_s x_stmt_tree;
cp_binding_level *class_bindings;
cp_binding_level *bindings;
hash_map<tree, tree> *GTY((skip)) x_local_specializations;
struct saved_scope *prev;
};
extern GTY(()) struct saved_scope *scope_chain;
/* The current open namespace. */
#define current_namespace scope_chain->old_namespace
/* The stack for namespaces of current declarations. */
#define decl_namespace_list scope_chain->decl_ns_list
/* IDENTIFIER_NODE: name of current class */
#define current_class_name scope_chain->class_name
/* _TYPE: the type of the current class */
#define current_class_type scope_chain->class_type
/* When parsing a class definition, the access specifier most recently
given by the user, or, if no access specifier was given, the
default value appropriate for the kind of class (i.e., struct,
class, or union). */
#define current_access_specifier scope_chain->access_specifier
/* Pointer to the top of the language name stack. */
#define current_lang_base scope_chain->lang_base
#define current_lang_name scope_chain->lang_name
/* When parsing a template declaration, a TREE_LIST represents the
active template parameters. Each node in the list represents one
level of template parameters. The innermost level is first in the
list. The depth of each level is stored as an INTEGER_CST in the
TREE_PURPOSE of each node. The parameters for that level are
stored in the TREE_VALUE. */
#define current_template_parms scope_chain->template_parms
#define processing_template_decl scope_chain->x_processing_template_decl
#define processing_specialization scope_chain->x_processing_specialization
#define processing_explicit_instantiation scope_chain->x_processing_explicit_instantiation
#define in_discarded_stmt scope_chain->discarded_stmt
/* RAII sentinel to handle clearing processing_template_decl and restoring
it when done. */
struct processing_template_decl_sentinel
{
int saved;
processing_template_decl_sentinel (bool reset = true)
: saved (processing_template_decl)
{
if (reset)
processing_template_decl = 0;
}
~processing_template_decl_sentinel()
{
processing_template_decl = saved;
}
};
/* RAII sentinel to disable certain warnings during template substitution
and elsewhere. */
struct warning_sentinel
{
int &flag;
int val;
warning_sentinel(int& flag, bool suppress=true)
: flag(flag), val(flag) { if (suppress) flag = 0; }
~warning_sentinel() { flag = val; }
};
/* RAII sentinel that saves the value of a variable, optionally
overrides it right away, and restores its value when the sentinel
id destructed. */
template <typename T>
class temp_override
{
T& overridden_variable;
T saved_value;
public:
temp_override(T& var) : overridden_variable (var), saved_value (var) {}
temp_override(T& var, T overrider)
: overridden_variable (var), saved_value (var)
{
overridden_variable = overrider;
}
~temp_override() { overridden_variable = saved_value; }
};
/* The cached class binding level, from the most recently exited
class, or NULL if none. */
#define previous_class_level scope_chain->x_previous_class_level
/* A map from local variable declarations in the body of the template
presently being instantiated to the corresponding instantiated
local variables. */
#define local_specializations scope_chain->x_local_specializations
/* Nonzero if we are parsing the operand of a noexcept operator. */
#define cp_noexcept_operand scope_chain->noexcept_operand
/* A list of private types mentioned, for deferred access checking. */
struct GTY((for_user)) cxx_int_tree_map {
unsigned int uid;
tree to;
};
struct cxx_int_tree_map_hasher : ggc_ptr_hash<cxx_int_tree_map>
{
static hashval_t hash (cxx_int_tree_map *);
static bool equal (cxx_int_tree_map *, cxx_int_tree_map *);
};
struct named_label_entry; /* Defined in decl.c. */
struct named_label_hash : ggc_remove <named_label_entry *>
{
typedef named_label_entry *value_type;
typedef tree compare_type; /* An identifier. */
inline static hashval_t hash (value_type);
inline static bool equal (const value_type, compare_type);
inline static void mark_empty (value_type &p) {p = NULL;}
inline static bool is_empty (value_type p) {return !p;}
/* Nothing is deletable. Everything is insertable. */
inline static bool is_deleted (value_type) { return false; }
inline static void mark_deleted (value_type) { gcc_unreachable (); }
};
/* Global state pertinent to the current function. */
struct GTY(()) language_function {
struct c_language_function base;
tree x_cdtor_label;
tree x_current_class_ptr;
tree x_current_class_ref;
tree x_eh_spec_block;
tree x_in_charge_parm;
tree x_vtt_parm;
tree x_return_value;
tree x_auto_return_pattern;
BOOL_BITFIELD returns_value : 1;
BOOL_BITFIELD returns_null : 1;
BOOL_BITFIELD returns_abnormally : 1;
BOOL_BITFIELD infinite_loop: 1;
BOOL_BITFIELD x_in_function_try_handler : 1;
BOOL_BITFIELD x_in_base_initializer : 1;
/* True if this function can throw an exception. */
BOOL_BITFIELD can_throw : 1;
BOOL_BITFIELD invalid_constexpr : 1;
hash_table<named_label_hash> *x_named_labels;
cp_binding_level *bindings;
vec<tree, va_gc> *x_local_names;
/* Tracking possibly infinite loops. This is a vec<tree> only because
vec<bool> doesn't work with gtype. */
vec<tree, va_gc> *infinite_loops;
hash_table<cxx_int_tree_map_hasher> *extern_decl_map;
};
/* The current C++-specific per-function global variables. */
#define cp_function_chain (cfun->language)
/* In a constructor destructor, the point at which all derived class
destroying/construction has been done. I.e., just before a
constructor returns, or before any base class destroying will be done
in a destructor. */
#define cdtor_label cp_function_chain->x_cdtor_label
/* When we're processing a member function, current_class_ptr is the
PARM_DECL for the `this' pointer. The current_class_ref is an
expression for `*this'. */
#define current_class_ptr \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ptr \
: &scope_chain->x_current_class_ptr))
#define current_class_ref \
(*(cfun && cp_function_chain \
? &cp_function_chain->x_current_class_ref \
: &scope_chain->x_current_class_ref))
/* The EH_SPEC_BLOCK for the exception-specifiers for the current
function, if any. */
#define current_eh_spec_block cp_function_chain->x_eh_spec_block
/* The `__in_chrg' parameter for the current function. Only used for
constructors and destructors. */
#define current_in_charge_parm cp_function_chain->x_in_charge_parm
/* The `__vtt_parm' parameter for the current function. Only used for
constructors and destructors. */
#define current_vtt_parm cp_function_chain->x_vtt_parm
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement that specifies a return value is seen. */
#define current_function_returns_value cp_function_chain->returns_value
/* Set to 0 at beginning of a function definition, set to 1 if
a return statement with no argument is seen. */
#define current_function_returns_null cp_function_chain->returns_null
/* Set to 0 at beginning of a function definition, set to 1 if
a call to a noreturn function is seen. */
#define current_function_returns_abnormally \
cp_function_chain->returns_abnormally
/* Set to 0 at beginning of a function definition, set to 1 if we see an
obvious infinite loop. This can have false positives and false
negatives, so it should only be used as a heuristic. */
#define current_function_infinite_loop cp_function_chain->infinite_loop
/* Nonzero if we are processing a base initializer. Zero elsewhere. */
#define in_base_initializer cp_function_chain->x_in_base_initializer
#define in_function_try_handler cp_function_chain->x_in_function_try_handler
/* Expression always returned from function, or error_mark_node
otherwise, for use by the automatic named return value optimization. */
#define current_function_return_value \
(cp_function_chain->x_return_value)
/* A type involving 'auto' to be used for return type deduction. */
#define current_function_auto_return_pattern \
(cp_function_chain->x_auto_return_pattern)
/* In parser.c. */
extern tree cp_literal_operator_id (const char *);
/* TRUE if a tree code represents a statement. */
extern bool statement_code_p[MAX_TREE_CODES];
#define STATEMENT_CODE_P(CODE) statement_code_p[(int) (CODE)]
enum languages { lang_c, lang_cplusplus };
/* Macros to make error reporting functions' lives easier. */
#define TYPE_LINKAGE_IDENTIFIER(NODE) \
(TYPE_IDENTIFIER (TYPE_MAIN_VARIANT (NODE)))
#define TYPE_NAME_STRING(NODE) (IDENTIFIER_POINTER (TYPE_IDENTIFIER (NODE)))
#define TYPE_NAME_LENGTH(NODE) (IDENTIFIER_LENGTH (TYPE_IDENTIFIER (NODE)))
/* Nonzero if NODE has no name for linkage purposes. */
#define TYPE_UNNAMED_P(NODE) \
(OVERLOAD_TYPE_P (NODE) && anon_aggrname_p (TYPE_LINKAGE_IDENTIFIER (NODE)))
/* The _DECL for this _TYPE. */
#define TYPE_MAIN_DECL(NODE) (TYPE_STUB_DECL (TYPE_MAIN_VARIANT (NODE)))
/* Nonzero if T is a type that could resolve to any kind of concrete type
at instantiation time. */
#define WILDCARD_TYPE_P(T) \
(TREE_CODE (T) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (T) == TYPENAME_TYPE \
|| TREE_CODE (T) == TYPEOF_TYPE \
|| TREE_CODE (T) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (T) == DECLTYPE_TYPE)
/* Nonzero if T is a class (or struct or union) type. Also nonzero
for template type parameters, typename types, and instantiated
template template parameters. Keep these checks in ascending code
order. */
#define MAYBE_CLASS_TYPE_P(T) (WILDCARD_TYPE_P (T) || CLASS_TYPE_P (T))
/* Set CLASS_TYPE_P for T to VAL. T must be a class, struct, or
union type. */
#define SET_CLASS_TYPE_P(T, VAL) \
(TYPE_LANG_FLAG_5 (RECORD_OR_UNION_CHECK (T)) = (VAL))
/* Nonzero if T is a class type. Zero for template type parameters,
typename types, and so forth. */
#define CLASS_TYPE_P(T) \
(RECORD_OR_UNION_CODE_P (TREE_CODE (T)) && TYPE_LANG_FLAG_5 (T))
/* Nonzero if T is a class type but not an union. */
#define NON_UNION_CLASS_TYPE_P(T) \
(TREE_CODE (T) == RECORD_TYPE && TYPE_LANG_FLAG_5 (T))
/* Keep these checks in ascending code order. */
#define RECORD_OR_UNION_CODE_P(T) \
((T) == RECORD_TYPE || (T) == UNION_TYPE)
#define OVERLOAD_TYPE_P(T) \
(CLASS_TYPE_P (T) || TREE_CODE (T) == ENUMERAL_TYPE)
/* True if this type is dependent. This predicate is only valid if
TYPE_DEPENDENT_P_VALID is true. */
#define TYPE_DEPENDENT_P(NODE) TYPE_LANG_FLAG_0 (NODE)
/* True if dependent_type_p has been called for this type, with the
result that TYPE_DEPENDENT_P is valid. */
#define TYPE_DEPENDENT_P_VALID(NODE) TYPE_LANG_FLAG_6(NODE)
/* Nonzero if this type is const-qualified. */
#define CP_TYPE_CONST_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_CONST) != 0)
/* Nonzero if this type is volatile-qualified. */
#define CP_TYPE_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_VOLATILE) != 0)
/* Nonzero if this type is restrict-qualified. */
#define CP_TYPE_RESTRICT_P(NODE) \
((cp_type_quals (NODE) & TYPE_QUAL_RESTRICT) != 0)
/* Nonzero if this type is const-qualified, but not
volatile-qualified. Other qualifiers are ignored. This macro is
used to test whether or not it is OK to bind an rvalue to a
reference. */
#define CP_TYPE_CONST_NON_VOLATILE_P(NODE) \
((cp_type_quals (NODE) & (TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE)) \
== TYPE_QUAL_CONST)
#define FUNCTION_ARG_CHAIN(NODE) \
TREE_CHAIN (TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Given a FUNCTION_DECL, returns the first TREE_LIST out of TYPE_ARG_TYPES
which refers to a user-written parameter. */
#define FUNCTION_FIRST_USER_PARMTYPE(NODE) \
skip_artificial_parms_for ((NODE), TYPE_ARG_TYPES (TREE_TYPE (NODE)))
/* Similarly, but for DECL_ARGUMENTS. */
#define FUNCTION_FIRST_USER_PARM(NODE) \
skip_artificial_parms_for ((NODE), DECL_ARGUMENTS (NODE))
/* Nonzero iff TYPE is derived from PARENT. Ignores accessibility and
ambiguity issues. */
#define DERIVED_FROM_P(PARENT, TYPE) \
(lookup_base ((TYPE), (PARENT), ba_any, NULL, tf_none) != NULL_TREE)
/* Gives the visibility specification for a class type. */
#define CLASSTYPE_VISIBILITY(TYPE) \
DECL_VISIBILITY (TYPE_MAIN_DECL (TYPE))
#define CLASSTYPE_VISIBILITY_SPECIFIED(TYPE) \
DECL_VISIBILITY_SPECIFIED (TYPE_MAIN_DECL (TYPE))
struct GTY (()) tree_pair_s {
tree purpose;
tree value;
};
typedef tree_pair_s *tree_pair_p;
/* This structure provides additional information above and beyond
what is provide in the ordinary tree_type. In the past, we used it
for the types of class types, template parameters types, typename
types, and so forth. However, there can be many (tens to hundreds
of thousands) of template parameter types in a compilation, and
there's no need for this additional information in that case.
Therefore, we now use this data structure only for class types.
In the past, it was thought that there would be relatively few
class types. However, in the presence of heavy use of templates,
many (i.e., thousands) of classes can easily be generated.
Therefore, we should endeavor to keep the size of this structure to
a minimum. */
struct GTY(()) lang_type {
unsigned char align;
unsigned has_type_conversion : 1;
unsigned has_copy_ctor : 1;
unsigned has_default_ctor : 1;
unsigned const_needs_init : 1;
unsigned ref_needs_init : 1;
unsigned has_const_copy_assign : 1;
unsigned use_template : 2;
unsigned has_mutable : 1;
unsigned com_interface : 1;
unsigned non_pod_class : 1;
unsigned nearly_empty_p : 1;
unsigned user_align : 1;
unsigned has_copy_assign : 1;
unsigned has_new : 1;
unsigned has_array_new : 1;
unsigned gets_delete : 2;
unsigned interface_only : 1;
unsigned interface_unknown : 1;
unsigned contains_empty_class_p : 1;
unsigned anon_aggr : 1;
unsigned non_zero_init : 1;
unsigned empty_p : 1;
/* 32 bits allocated. */
unsigned vec_new_uses_cookie : 1;
unsigned declared_class : 1;
unsigned diamond_shaped : 1;
unsigned repeated_base : 1;
unsigned being_defined : 1;
unsigned debug_requested : 1;
unsigned fields_readonly : 1;
unsigned ptrmemfunc_flag : 1;
unsigned was_anonymous : 1;
unsigned lazy_default_ctor : 1;
unsigned lazy_copy_ctor : 1;
unsigned lazy_copy_assign : 1;
unsigned lazy_destructor : 1;
unsigned has_const_copy_ctor : 1;
unsigned has_complex_copy_ctor : 1;
unsigned has_complex_copy_assign : 1;
unsigned non_aggregate : 1;
unsigned has_complex_dflt : 1;
unsigned has_list_ctor : 1;
unsigned non_std_layout : 1;
unsigned is_literal : 1;
unsigned lazy_move_ctor : 1;
unsigned lazy_move_assign : 1;
unsigned has_complex_move_ctor : 1;
unsigned has_complex_move_assign : 1;
unsigned has_constexpr_ctor : 1;
unsigned unique_obj_representations : 1;
unsigned unique_obj_representations_set : 1;
/* When adding a flag here, consider whether or not it ought to
apply to a template instance if it applies to the template. If
so, make sure to copy it in instantiate_class_template! */
/* There are some bits left to fill out a 32-bit word. Keep track
of this by updating the size of this bitfield whenever you add or
remove a flag. */
unsigned dummy : 4;
tree primary_base;
vec<tree_pair_s, va_gc> *vcall_indices;
tree vtables;
tree typeinfo_var;
vec<tree, va_gc> *vbases;
binding_table nested_udts;
tree as_base;
vec<tree, va_gc> *pure_virtuals;
tree friend_classes;
vec<tree, va_gc> * GTY((reorder ("resort_type_member_vec"))) members;
tree key_method;
tree decl_list;
tree befriending_classes;
/* In a RECORD_TYPE, information specific to Objective-C++, such
as a list of adopted protocols or a pointer to a corresponding
@interface. See objc/objc-act.h for details. */
tree objc_info;
/* FIXME reuse another field? */
tree lambda_expr;
};
/* We used to have a variant type for lang_type. Keep the name of the
checking accessor for the sole survivor. */
#define LANG_TYPE_CLASS_CHECK(NODE) (TYPE_LANG_SPECIFIC (NODE))
/* Nonzero for _CLASSTYPE means that operator delete is defined. */
#define TYPE_GETS_DELETE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->gets_delete)
#define TYPE_GETS_REG_DELETE(NODE) (TYPE_GETS_DELETE (NODE) & 1)
/* Nonzero if `new NODE[x]' should cause the allocation of extra
storage to indicate how many array elements are in use. */
#define TYPE_VEC_NEW_USES_COOKIE(NODE) \
(CLASS_TYPE_P (NODE) \
&& LANG_TYPE_CLASS_CHECK (NODE)->vec_new_uses_cookie)
/* Nonzero means that this _CLASSTYPE node defines ways of converting
itself to other types. */
#define TYPE_HAS_CONVERSION(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_type_conversion)
/* Nonzero means that NODE (a class type) has a default constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_DEFAULT_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_default_ctor)
/* Nonzero means that NODE (a class type) has a copy constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_ctor)
/* Nonzero means that NODE (a class type) has a move constructor --
but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_ctor)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_copy_assign)
/* Nonzero means that NODE (a class type) has an assignment operator
-- but that it has not yet been declared. */
#define CLASSTYPE_LAZY_MOVE_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_move_assign)
/* Nonzero means that NODE (a class type) has a destructor -- but that
it has not yet been declared. */
#define CLASSTYPE_LAZY_DESTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lazy_destructor)
/* Nonzero means that NODE (a class type) is final */
#define CLASSTYPE_FINAL(NODE) \
TYPE_FINAL_P (NODE)
/* Nonzero means that this _CLASSTYPE node overloads operator=(X&). */
#define TYPE_HAS_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_assign)
/* True iff the class type NODE has an "operator =" whose parameter
has a parameter of type "const X&". */
#define TYPE_HAS_CONST_COPY_ASSIGN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_assign)
/* Nonzero means that this _CLASSTYPE node has an X(X&) constructor. */
#define TYPE_HAS_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_copy_ctor)
#define TYPE_HAS_CONST_COPY_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_const_copy_ctor)
/* Nonzero if this class has an X(initializer_list<T>) constructor. */
#define TYPE_HAS_LIST_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_list_ctor)
/* Nonzero if this class has a constexpr constructor other than a copy/move
constructor. Note that a class can have constexpr constructors for
static initialization even if it isn't a literal class. */
#define TYPE_HAS_CONSTEXPR_CTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_constexpr_ctor)
/* Nonzero if this class defines an overloaded operator new. (An
operator new [] doesn't count.) */
#define TYPE_HAS_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_new)
/* Nonzero if this class defines an overloaded operator new[]. */
#define TYPE_HAS_ARRAY_NEW_OPERATOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_array_new)
/* Nonzero means that this type is being defined. I.e., the left brace
starting the definition of this type has been seen. */
#define TYPE_BEING_DEFINED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->being_defined)
/* Nonzero means that this type is either complete or being defined, so we
can do lookup in it. */
#define COMPLETE_OR_OPEN_TYPE_P(NODE) \
(COMPLETE_TYPE_P (NODE) || (CLASS_TYPE_P (NODE) && TYPE_BEING_DEFINED (NODE)))
/* Mark bits for repeated base checks. */
#define TYPE_MARKED_P(NODE) TREE_LANG_FLAG_6 (TYPE_CHECK (NODE))
/* Nonzero if the class NODE has multiple paths to the same (virtual)
base object. */
#define CLASSTYPE_DIAMOND_SHAPED_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->diamond_shaped)
/* Nonzero if the class NODE has multiple instances of the same base
type. */
#define CLASSTYPE_REPEATED_BASE_P(NODE) \
(LANG_TYPE_CLASS_CHECK(NODE)->repeated_base)
/* The member function with which the vtable will be emitted:
the first noninline non-pure-virtual member function. NULL_TREE
if there is no key function or if this is a class template */
#define CLASSTYPE_KEY_METHOD(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->key_method)
/* Vector of members. During definition, it is unordered and only
member functions are present. After completion it is sorted and
contains both member functions and non-functions. STAT_HACK is
involved to preserve oneslot per name invariant. */
#define CLASSTYPE_MEMBER_VEC(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->members)
/* For class templates, this is a TREE_LIST of all member data,
functions, types, and friends in the order of declaration.
The TREE_PURPOSE of each TREE_LIST is NULL_TREE for a friend,
and the RECORD_TYPE for the class template otherwise. */
#define CLASSTYPE_DECL_LIST(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->decl_list)
/* A FUNCTION_DECL or OVERLOAD for the constructors for NODE. These
are the constructors that take an in-charge parameter. */
#define CLASSTYPE_CONSTRUCTORS(NODE) \
(get_class_binding_direct (NODE, ctor_identifier))
/* A FUNCTION_DECL for the destructor for NODE. This is the
destructors that take an in-charge parameter. If
CLASSTYPE_LAZY_DESTRUCTOR is true, then this entry will be NULL
until the destructor is created with lazily_declare_fn. */
#define CLASSTYPE_DESTRUCTOR(NODE) \
(get_class_binding_direct (NODE, dtor_identifier))
/* A dictionary of the nested user-defined-types (class-types, or enums)
found within this class. This table includes nested member class
templates. */
#define CLASSTYPE_NESTED_UTDS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nested_udts)
/* Nonzero if NODE has a primary base class, i.e., a base class with
which it shares the virtual function table pointer. */
#define CLASSTYPE_HAS_PRIMARY_BASE_P(NODE) \
(CLASSTYPE_PRIMARY_BINFO (NODE) != NULL_TREE)
/* If non-NULL, this is the binfo for the primary base class, i.e.,
the base class which contains the virtual function table pointer
for this class. */
#define CLASSTYPE_PRIMARY_BINFO(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->primary_base)
/* A vector of BINFOs for the direct and indirect virtual base classes
that this type uses in a post-order depth-first left-to-right
order. (In other words, these bases appear in the order that they
should be initialized.) */
#define CLASSTYPE_VBASECLASSES(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->vbases)
/* The type corresponding to NODE when NODE is used as a base class,
i.e., NODE without virtual base classes or tail padding. */
#define CLASSTYPE_AS_BASE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->as_base)
/* True iff NODE is the CLASSTYPE_AS_BASE version of some type. */
#define IS_FAKE_BASE_TYPE(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_CONTEXT (NODE) && CLASS_TYPE_P (TYPE_CONTEXT (NODE)) \
&& CLASSTYPE_AS_BASE (TYPE_CONTEXT (NODE)) == (NODE))
/* These are the size and alignment of the type without its virtual
base classes, for when we use this type as a base itself. */
#define CLASSTYPE_SIZE(NODE) TYPE_SIZE (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_SIZE_UNIT(NODE) TYPE_SIZE_UNIT (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_ALIGN(NODE) TYPE_ALIGN (CLASSTYPE_AS_BASE (NODE))
#define CLASSTYPE_USER_ALIGN(NODE) TYPE_USER_ALIGN (CLASSTYPE_AS_BASE (NODE))
/* The alignment of NODE, without its virtual bases, in bytes. */
#define CLASSTYPE_ALIGN_UNIT(NODE) \
(CLASSTYPE_ALIGN (NODE) / BITS_PER_UNIT)
/* A vec<tree> of virtual functions which cannot be inherited by
derived classes. When deriving from this type, the derived
class must provide its own definition for each of these functions. */
#define CLASSTYPE_PURE_VIRTUALS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->pure_virtuals)
/* Nonzero means that this type is an abstract class type. */
#define ABSTRACT_CLASS_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_PURE_VIRTUALS(NODE))
/* Nonzero means that this type has an X() constructor. */
#define TYPE_HAS_DEFAULT_CONSTRUCTOR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->has_default_ctor)
/* Nonzero means that this type contains a mutable member. */
#define CLASSTYPE_HAS_MUTABLE(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_mutable)
#define TYPE_HAS_MUTABLE_P(NODE) (cp_has_mutable_p (NODE))
/* Nonzero means that this class type is not POD for the purpose of layout
(as defined in the ABI). This is different from the language's POD. */
#define CLASSTYPE_NON_LAYOUT_POD_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_pod_class)
/* Nonzero means that this class type is a non-standard-layout class. */
#define CLASSTYPE_NON_STD_LAYOUT(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_std_layout)
/* Nonzero means that this class type does have unique object
representations. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations)
/* Nonzero means that this class type has
CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS computed. */
#define CLASSTYPE_UNIQUE_OBJ_REPRESENTATIONS_SET(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->unique_obj_representations_set)
/* Nonzero means that this class contains pod types whose default
initialization is not a zero initialization (namely, pointers to
data members). */
#define CLASSTYPE_NON_ZERO_INIT_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_zero_init)
/* Nonzero if this class is "empty" in the sense of the C++ ABI. */
#define CLASSTYPE_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->empty_p)
/* Nonzero if this class is "nearly empty", i.e., contains only a
virtual function table pointer. */
#define CLASSTYPE_NEARLY_EMPTY_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->nearly_empty_p)
/* Nonzero if this class contains an empty subobject. */
#define CLASSTYPE_CONTAINS_EMPTY_CLASS_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->contains_empty_class_p)
/* A list of class types of which this type is a friend. The
TREE_VALUE is normally a TYPE, but will be a TEMPLATE_DECL in the
case of a template friend. */
#define CLASSTYPE_FRIEND_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->friend_classes)
/* A list of the classes which grant friendship to this class. */
#define CLASSTYPE_BEFRIENDING_CLASSES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->befriending_classes)
/* The associated LAMBDA_EXPR that made this class. */
#define CLASSTYPE_LAMBDA_EXPR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->lambda_expr)
/* The extra mangling scope for this closure type. */
#define LAMBDA_TYPE_EXTRA_SCOPE(NODE) \
(LAMBDA_EXPR_EXTRA_SCOPE (CLASSTYPE_LAMBDA_EXPR (NODE)))
/* Say whether this node was declared as a "class" or a "struct". */
#define CLASSTYPE_DECLARED_CLASS(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->declared_class)
/* Nonzero if this class has const members
which have no specified initialization. */
#define CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init : 0)
#define SET_CLASSTYPE_READONLY_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->const_needs_init = (VALUE))
/* Nonzero if this class has ref members
which have no specified initialization. */
#define CLASSTYPE_REF_FIELDS_NEED_INIT(NODE) \
(TYPE_LANG_SPECIFIC (NODE) \
? LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init : 0)
#define SET_CLASSTYPE_REF_FIELDS_NEED_INIT(NODE, VALUE) \
(LANG_TYPE_CLASS_CHECK (NODE)->ref_needs_init = (VALUE))
/* Nonzero if this class is included from a header file which employs
`#pragma interface', and it is not included in its implementation file. */
#define CLASSTYPE_INTERFACE_ONLY(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_only)
/* True if we have already determined whether or not vtables, VTTs,
typeinfo, and other similar per-class data should be emitted in
this translation unit. This flag does not indicate whether or not
these items should be emitted; it only indicates that we know one
way or the other. */
#define CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown == 0)
/* The opposite of CLASSTYPE_INTERFACE_KNOWN. */
#define CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown)
#define SET_CLASSTYPE_INTERFACE_UNKNOWN_X(NODE,X) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = !!(X))
#define SET_CLASSTYPE_INTERFACE_UNKNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 1)
#define SET_CLASSTYPE_INTERFACE_KNOWN(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->interface_unknown = 0)
/* Nonzero if a _DECL node requires us to output debug info for this class. */
#define CLASSTYPE_DEBUG_REQUESTED(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->debug_requested)
/* Additional macros for inheritance information. */
/* Nonzero means that this class is on a path leading to a new vtable. */
#define BINFO_VTABLE_PATH_MARKED(NODE) BINFO_FLAG_1 (NODE)
/* Nonzero means B (a BINFO) has its own vtable. Any copies will not
have this flag set. */
#define BINFO_NEW_VTABLE_MARKED(B) (BINFO_FLAG_2 (B))
/* Compare a BINFO_TYPE with another type for equality. For a binfo,
this is functionally equivalent to using same_type_p, but
measurably faster. At least one of the arguments must be a
BINFO_TYPE. The other can be a BINFO_TYPE or a regular type. If
BINFO_TYPE(T) ever stops being the main variant of the class the
binfo is for, this macro must change. */
#define SAME_BINFO_TYPE_P(A, B) ((A) == (B))
/* Any subobject that needs a new vtable must have a vptr and must not
be a non-virtual primary base (since it would then use the vtable from a
derived class and never become non-primary.) */
#define SET_BINFO_NEW_VTABLE_MARKED(B) \
(BINFO_NEW_VTABLE_MARKED (B) = 1, \
gcc_assert (!BINFO_PRIMARY_P (B) || BINFO_VIRTUAL_P (B)), \
gcc_assert (TYPE_VFIELD (BINFO_TYPE (B))))
/* Nonzero if this binfo is for a dependent base - one that should not
be searched. */
#define BINFO_DEPENDENT_BASE_P(NODE) BINFO_FLAG_3 (NODE)
/* Nonzero if this binfo has lost its primary base binfo (because that
is a nearly-empty virtual base that has been taken by some other
base in the complete hierarchy. */
#define BINFO_LOST_PRIMARY_P(NODE) BINFO_FLAG_4 (NODE)
/* Nonzero if this BINFO is a primary base class. */
#define BINFO_PRIMARY_P(NODE) BINFO_FLAG_5(NODE)
/* A vec<tree_pair_s> of the vcall indices associated with the class
NODE. The PURPOSE of each element is a FUNCTION_DECL for a virtual
function. The VALUE is the index into the virtual table where the
vcall offset for that function is stored, when NODE is a virtual
base. */
#define CLASSTYPE_VCALL_INDICES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vcall_indices)
/* The various vtables for the class NODE. The primary vtable will be
first, followed by the construction vtables and VTT, if any. */
#define CLASSTYPE_VTABLES(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->vtables)
/* The std::type_info variable representing this class, or NULL if no
such variable has been created. This field is only set for the
TYPE_MAIN_VARIANT of the class. */
#define CLASSTYPE_TYPEINFO_VAR(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->typeinfo_var)
/* Accessor macros for the BINFO_VIRTUALS list. */
/* The number of bytes by which to adjust the `this' pointer when
calling this virtual function. Subtract this value from the this
pointer. Always non-NULL, might be constant zero though. */
#define BV_DELTA(NODE) (TREE_PURPOSE (NODE))
/* If non-NULL, the vtable index at which to find the vcall offset
when calling this virtual function. Add the value at that vtable
index to the this pointer. */
#define BV_VCALL_INDEX(NODE) (TREE_TYPE (NODE))
/* The function to call. */
#define BV_FN(NODE) (TREE_VALUE (NODE))
/* Whether or not this entry is for a lost primary virtual base. */
#define BV_LOST_PRIMARY(NODE) (TREE_LANG_FLAG_0 (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, a list of the exceptions that
this type can raise. Each TREE_VALUE is a _TYPE. The TREE_VALUE
will be NULL_TREE to indicate a throw specification of `()', or
no exceptions allowed. For a noexcept specification, TREE_VALUE
is NULL_TREE and TREE_PURPOSE is the constant-expression. For
a deferred noexcept-specification, TREE_PURPOSE is a DEFERRED_NOEXCEPT
(for templates) or an OVERLOAD list of functions (for implicitly
declared functions). */
#define TYPE_RAISES_EXCEPTIONS(NODE) \
TYPE_LANG_SLOT_1 (FUNC_OR_METHOD_CHECK (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, return 1 iff it is declared `throw()'
or noexcept(true). */
#define TYPE_NOTHROW_P(NODE) nothrow_spec_p (TYPE_RAISES_EXCEPTIONS (NODE))
/* For FUNCTION_TYPE or METHOD_TYPE, true if NODE is noexcept. This is the
case for things declared noexcept(true) and, with -fnothrow-opt, for
throw() functions. */
#define TYPE_NOEXCEPT_P(NODE) type_noexcept_p (NODE)
/* The binding level associated with the namespace. */
#define NAMESPACE_LEVEL(NODE) \
(LANG_DECL_NS_CHECK (NODE)->level)
/* Discriminator values for lang_decl. */
enum lang_decl_selector
{
lds_min,
lds_fn,
lds_ns,
lds_parm,
lds_decomp
};
/* Flags shared by all forms of DECL_LANG_SPECIFIC.
Some of the flags live here only to make lang_decl_min/fn smaller. Do
not make this struct larger than 32 bits; instead, make sel smaller. */
struct GTY(()) lang_decl_base {
/* Larger than necessary for faster access. */
ENUM_BITFIELD(lang_decl_selector) selector : 16;
ENUM_BITFIELD(languages) language : 1;
unsigned use_template : 2;
unsigned not_really_extern : 1; /* var or fn */
unsigned initialized_in_class : 1; /* var or fn */
unsigned repo_available_p : 1; /* var or fn */
unsigned threadprivate_or_deleted_p : 1; /* var or fn */
unsigned anticipated_p : 1; /* fn, type or template */
/* anticipated_p reused as DECL_OMP_PRIVATIZED_MEMBER in var */
unsigned friend_or_tls : 1; /* var, fn, type or template */
unsigned unknown_bound_p : 1; /* var */
unsigned odr_used : 1; /* var or fn */
unsigned u2sel : 1;
unsigned concept_p : 1; /* applies to vars and functions */
unsigned var_declared_inline_p : 1; /* var */
unsigned dependent_init_p : 1; /* var */
/* 1 spare bit */
};
/* True for DECL codes which have template info and access. */
#define LANG_DECL_HAS_MIN(NODE) \
(VAR_OR_FUNCTION_DECL_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL \
|| TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL \
|| TREE_CODE (NODE) == USING_DECL)
/* DECL_LANG_SPECIFIC for the above codes. */
struct GTY(()) lang_decl_min {
struct lang_decl_base base;
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_ALIAS.
In a FUNCTION_DECL for which DECL_THUNK_P does not hold,
VAR_DECL, TYPE_DECL, or TEMPLATE_DECL, this is
DECL_TEMPLATE_INFO. */
tree template_info;
union lang_decl_u2 {
/* In a FUNCTION_DECL for which DECL_THUNK_P holds, this is
THUNK_VIRTUAL_OFFSET.
In a VAR_DECL for which DECL_HAS_VALUE_EXPR_P holds,
this is DECL_CAPTURED_VARIABLE.
Otherwise this is DECL_ACCESS. */
tree GTY ((tag ("0"))) access;
/* For TREE_STATIC VAR_DECL in function, this is DECL_DISCRIMINATOR. */
int GTY ((tag ("1"))) discriminator;
} GTY ((desc ("%0.u.base.u2sel"))) u2;
};
/* Additional DECL_LANG_SPECIFIC information for functions. */
struct GTY(()) lang_decl_fn {
struct lang_decl_min min;
/* In a overloaded operator, this is the compressed operator code. */
unsigned ovl_op_code : 6;
unsigned global_ctor_p : 1;
unsigned global_dtor_p : 1;
unsigned static_function : 1;
unsigned pure_virtual : 1;
unsigned defaulted_p : 1;
unsigned has_in_charge_parm_p : 1;
unsigned has_vtt_parm_p : 1;
unsigned pending_inline_p : 1;
unsigned nonconverting : 1;
unsigned thunk_p : 1;
unsigned this_thunk_p : 1;
unsigned hidden_friend_p : 1;
unsigned omp_declare_reduction_p : 1;
unsigned spare : 13;
/* 32-bits padding on 64-bit host. */
/* For a non-thunk function decl, this is a tree list of
friendly classes. For a thunk function decl, it is the
thunked to function decl. */
tree befriending_classes;
/* For a non-virtual FUNCTION_DECL, this is
DECL_FRIEND_CONTEXT. For a virtual FUNCTION_DECL for which
DECL_THIS_THUNK_P does not hold, this is DECL_THUNKS. Both
this pointer and result pointer adjusting thunks are
chained here. This pointer thunks to return pointer thunks
will be chained on the return pointer thunk. */
tree context;
union lang_decl_u5
{
/* In a non-thunk FUNCTION_DECL or TEMPLATE_DECL, this is
DECL_CLONED_FUNCTION. */
tree GTY ((tag ("0"))) cloned_function;
/* In a FUNCTION_DECL for which THUNK_P holds this is the
THUNK_FIXED_OFFSET. */
HOST_WIDE_INT GTY ((tag ("1"))) fixed_offset;
} GTY ((desc ("%1.thunk_p"))) u5;
union lang_decl_u3
{
struct cp_token_cache * GTY ((tag ("1"))) pending_inline_info;
struct language_function * GTY ((tag ("0")))
saved_language_function;
} GTY ((desc ("%1.pending_inline_p"))) u;
};
/* DECL_LANG_SPECIFIC for namespaces. */
struct GTY(()) lang_decl_ns {
struct lang_decl_base base;
cp_binding_level *level;
/* using directives and inline children. These need to be va_gc,
because of PCH. */
vec<tree, va_gc> *usings;
vec<tree, va_gc> *inlinees;
/* Hash table of bound decls. It'd be nice to have this inline, but
as the hash_map has a dtor, we can't then put this struct into a
union (until moving to c++11). */
hash_table<named_decl_hash> *bindings;
};
/* DECL_LANG_SPECIFIC for parameters. */
struct GTY(()) lang_decl_parm {
struct lang_decl_base base;
int level;
int index;
};
/* Additional DECL_LANG_SPECIFIC information for structured bindings. */
struct GTY(()) lang_decl_decomp {
struct lang_decl_min min;
/* The artificial underlying "e" variable of the structured binding
variable. */
tree base;
};
/* DECL_LANG_SPECIFIC for all types. It would be nice to just make this a
union rather than a struct containing a union as its only field, but
tree.h declares it as a struct. */
struct GTY(()) lang_decl {
union GTY((desc ("%h.base.selector"))) lang_decl_u {
/* Nothing of only the base type exists. */
struct lang_decl_base GTY ((default)) base;
struct lang_decl_min GTY((tag ("lds_min"))) min;
struct lang_decl_fn GTY ((tag ("lds_fn"))) fn;
struct lang_decl_ns GTY((tag ("lds_ns"))) ns;
struct lang_decl_parm GTY((tag ("lds_parm"))) parm;
struct lang_decl_decomp GTY((tag ("lds_decomp"))) decomp;
} u;
};
/* Looks through a template (if present) to find what it declares. */
#define STRIP_TEMPLATE(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL ? DECL_TEMPLATE_RESULT (NODE) : NODE)
#if defined ENABLE_TREE_CHECKING && (GCC_VERSION >= 2007)
#define LANG_DECL_MIN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE)) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min; })
/* We want to be able to check DECL_CONSTRUCTOR_P and such on a function
template, not just on a FUNCTION_DECL. So when looking for things in
lang_decl_fn, look down through a TEMPLATE_DECL into its result. */
#define LANG_DECL_FN_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE)); \
if (!DECL_DECLARES_FUNCTION_P (NODE) \
|| lt->u.base.selector != lds_fn) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.fn; })
#define LANG_DECL_NS_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != NAMESPACE_DECL \
|| lt->u.base.selector != lds_ns) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.ns; })
#define LANG_DECL_PARM_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (TREE_CODE (NODE) != PARM_DECL \
|| lt->u.base.selector != lds_parm) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.parm; })
#define LANG_DECL_DECOMP_CHECK(NODE) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!VAR_P (NODE) \
|| lt->u.base.selector != lds_decomp) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.decomp; })
#define LANG_DECL_U2_CHECK(NODE, TF) __extension__ \
({ struct lang_decl *lt = DECL_LANG_SPECIFIC (NODE); \
if (!LANG_DECL_HAS_MIN (NODE) || lt->u.base.u2sel != TF) \
lang_check_failed (__FILE__, __LINE__, __FUNCTION__); \
<->u.min.u2; })
#else
#define LANG_DECL_MIN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.min)
#define LANG_DECL_FN_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (STRIP_TEMPLATE (NODE))->u.fn)
#define LANG_DECL_NS_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.ns)
#define LANG_DECL_PARM_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.parm)
#define LANG_DECL_DECOMP_CHECK(NODE) \
(&DECL_LANG_SPECIFIC (NODE)->u.decomp)
#define LANG_DECL_U2_CHECK(NODE, TF) \
(&DECL_LANG_SPECIFIC (NODE)->u.min.u2)
#endif /* ENABLE_TREE_CHECKING */
/* For a FUNCTION_DECL or a VAR_DECL, the language linkage for the
declaration. Some entities (like a member function in a local
class, or a local variable) do not have linkage at all, and this
macro should not be used in those cases.
Implementation note: A FUNCTION_DECL without DECL_LANG_SPECIFIC was
created by language-independent code, and has C linkage. Most
VAR_DECLs have C++ linkage, and do not have DECL_LANG_SPECIFIC, but
we do create DECL_LANG_SPECIFIC for variables with non-C++ linkage. */
#define DECL_LANGUAGE(NODE) \
(DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.language \
: (TREE_CODE (NODE) == FUNCTION_DECL \
? lang_c : lang_cplusplus))
/* Set the language linkage for NODE to LANGUAGE. */
#define SET_DECL_LANGUAGE(NODE, LANGUAGE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.language = (LANGUAGE))
/* For FUNCTION_DECLs and TEMPLATE_DECLs: nonzero means that this function
is a constructor. */
#define DECL_CONSTRUCTOR_P(NODE) \
IDENTIFIER_CTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a complete
object. */
#define DECL_COMPLETE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor for a base
object. */
#define DECL_BASE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a constructor, but not either the
specialized in-charge constructor or the specialized not-in-charge
constructor. */
#define DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == ctor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a copy constructor. */
#define DECL_COPY_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && copy_fn_p (NODE) > 0)
/* Nonzero if NODE (a FUNCTION_DECL) is a move constructor. */
#define DECL_MOVE_CONSTRUCTOR_P(NODE) \
(DECL_CONSTRUCTOR_P (NODE) && move_fn_p (NODE))
/* Nonzero if NODE (a FUNCTION_DECL or TEMPLATE_DECL)
is a destructor. */
#define DECL_DESTRUCTOR_P(NODE) \
IDENTIFIER_DTOR_P (DECL_NAME (NODE))
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor, but not the
specialized in-charge constructor, in-charge deleting constructor,
or the base destructor. */
#define DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object. */
#define DECL_COMPLETE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == complete_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a base
object. */
#define DECL_BASE_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == base_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a destructor for a complete
object that deletes the object after it has been destroyed. */
#define DECL_DELETING_DESTRUCTOR_P(NODE) \
(DECL_NAME (NODE) == deleting_dtor_identifier)
/* Nonzero if NODE (a FUNCTION_DECL) is a cloned constructor or
destructor. */
#define DECL_CLONED_FUNCTION_P(NODE) (!!decl_cloned_function_p (NODE, true))
/* If DECL_CLONED_FUNCTION_P holds, this is the function that was
cloned. */
#define DECL_CLONED_FUNCTION(NODE) (*decl_cloned_function_p (NODE, false))
/* Perform an action for each clone of FN, if FN is a function with
clones. This macro should be used like:
FOR_EACH_CLONE (clone, fn)
{ ... }
*/
#define FOR_EACH_CLONE(CLONE, FN) \
if (!(TREE_CODE (FN) == FUNCTION_DECL \
&& (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (FN) \
|| DECL_MAYBE_IN_CHARGE_DESTRUCTOR_P (FN))))\
; \
else \
for (CLONE = DECL_CHAIN (FN); \
CLONE && DECL_CLONED_FUNCTION_P (CLONE); \
CLONE = DECL_CHAIN (CLONE))
/* Nonzero if NODE has DECL_DISCRIMINATOR and not DECL_ACCESS. */
#define DECL_DISCRIMINATOR_P(NODE) \
(VAR_P (NODE) && DECL_FUNCTION_SCOPE_P (NODE))
/* Discriminator for name mangling. */
#define DECL_DISCRIMINATOR(NODE) (LANG_DECL_U2_CHECK (NODE, 1)->discriminator)
/* True iff DECL_DISCRIMINATOR is set for a DECL_DISCRIMINATOR_P decl. */
#define DECL_DISCRIMINATOR_SET_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && DECL_LANG_SPECIFIC (NODE)->u.base.u2sel == 1)
/* The index of a user-declared parameter in its function, starting at 1.
All artificial parameters will have index 0. */
#define DECL_PARM_INDEX(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->index)
/* The level of a user-declared parameter in its function, starting at 1.
A parameter of the function will have level 1; a parameter of the first
nested function declarator (i.e. t in void f (void (*p)(T t))) will have
level 2. */
#define DECL_PARM_LEVEL(NODE) \
(LANG_DECL_PARM_CHECK (NODE)->level)
/* Nonzero if the VTT parm has been added to NODE. */
#define DECL_HAS_VTT_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_vtt_parm_p)
/* Nonzero if NODE is a FUNCTION_DECL for which a VTT parameter is
required. */
#define DECL_NEEDS_VTT_PARM_P(NODE) \
(CLASSTYPE_VBASECLASSES (DECL_CONTEXT (NODE)) \
&& (DECL_BASE_CONSTRUCTOR_P (NODE) \
|| DECL_BASE_DESTRUCTOR_P (NODE)))
/* Nonzero if NODE is a user-defined conversion operator. */
#define DECL_CONV_FN_P(NODE) IDENTIFIER_CONV_OP_P (DECL_NAME (NODE))
/* The type to which conversion operator FN converts to. */
#define DECL_CONV_FN_TYPE(FN) \
TREE_TYPE ((gcc_checking_assert (DECL_CONV_FN_P (FN)), DECL_NAME (FN)))
/* Nonzero if NODE, a static data member, was declared in its class as an
array of unknown bound. */
#define VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.unknown_bound_p \
: false)
#define SET_VAR_HAD_UNKNOWN_BOUND(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.unknown_bound_p = true)
/* True iff decl NODE is for an overloaded operator. */
#define DECL_OVERLOADED_OPERATOR_P(NODE) \
IDENTIFIER_ANY_OP_P (DECL_NAME (NODE))
/* Nonzero if NODE is an assignment operator (including += and such). */
#define DECL_ASSIGNMENT_OPERATOR_P(NODE) \
IDENTIFIER_ASSIGN_OP_P (DECL_NAME (NODE))
/* NODE is a function_decl for an overloaded operator. Return its
compressed (raw) operator code. Note that this is not a TREE_CODE. */
#define DECL_OVERLOADED_OPERATOR_CODE_RAW(NODE) \
(LANG_DECL_FN_CHECK (NODE)->ovl_op_code)
/* DECL is an overloaded operator. Test whether it is for TREE_CODE
(a literal constant). */
#define DECL_OVERLOADED_OPERATOR_IS(DECL, CODE) \
(DECL_OVERLOADED_OPERATOR_CODE_RAW (DECL) == OVL_OP_##CODE)
/* For FUNCTION_DECLs: nonzero means that this function is a
constructor or a destructor with an extra in-charge parameter to
control whether or not virtual bases are constructed. */
#define DECL_HAS_IN_CHARGE_PARM_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->has_in_charge_parm_p)
/* Nonzero if DECL is a declaration of __builtin_constant_p. */
#define DECL_IS_BUILTIN_CONSTANT_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_BUILT_IN_CLASS (NODE) == BUILT_IN_NORMAL \
&& DECL_FUNCTION_CODE (NODE) == BUILT_IN_CONSTANT_P)
/* Nonzero for _DECL means that this decl appears in (or will appear
in) as a member in a RECORD_TYPE or UNION_TYPE node. It is also for
detecting circularity in case members are multiply defined. In the
case of a VAR_DECL, it is also used to determine how program storage
should be allocated. */
#define DECL_IN_AGGR_P(NODE) (DECL_LANG_FLAG_3 (NODE))
/* Nonzero for a VAR_DECL means that the variable's initialization (if
any) has been processed. (In general, DECL_INITIALIZED_P is
!DECL_EXTERNAL, but static data members may be initialized even if
not defined.) */
#define DECL_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_1 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL iff an explicit initializer was provided
or a non-trivial constructor is called. */
#define DECL_NONTRIVIALLY_INITIALIZED_P(NODE) \
(TREE_LANG_FLAG_3 (VAR_DECL_CHECK (NODE)))
/* Nonzero for a VAR_DECL that was initialized with a
constant-expression. */
#define DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P(NODE) \
(TREE_LANG_FLAG_2 (VAR_DECL_CHECK (NODE)))
/* Nonzero if the DECL was initialized in the class definition itself,
rather than outside the class. This is used for both static member
VAR_DECLS, and FUNCTION_DECLS that are defined in the class. */
#define DECL_INITIALIZED_IN_CLASS_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.initialized_in_class)
/* Nonzero if the DECL is used in the sense of 3.2 [basic.def.odr].
Only available for decls with DECL_LANG_SPECIFIC. */
#define DECL_ODR_USED(DECL) \
(DECL_LANG_SPECIFIC (VAR_OR_FUNCTION_DECL_CHECK (DECL)) \
->u.base.odr_used)
/* Nonzero for DECL means that this decl is just a friend declaration,
and should not be added to the list of members for this class. */
#define DECL_FRIEND_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.friend_or_tls)
/* Nonzero if the thread-local variable was declared with __thread as
opposed to thread_local. */
#define DECL_GNU_TLS_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls)
#define SET_DECL_GNU_TLS_P(NODE) \
(retrofit_lang_decl (VAR_DECL_CHECK (NODE)), \
DECL_LANG_SPECIFIC (NODE)->u.base.friend_or_tls = true)
/* A TREE_LIST of the types which have befriended this FUNCTION_DECL. */
#define DECL_BEFRIENDING_CLASSES(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* Nonzero for FUNCTION_DECL means that this decl is a static
member function. */
#define DECL_STATIC_FUNCTION_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->static_function)
/* Nonzero for FUNCTION_DECL means that this decl is a non-static
member function. */
#define DECL_NONSTATIC_MEMBER_FUNCTION_P(NODE) \
(TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE)
/* Nonzero for FUNCTION_DECL means that this decl is a member function
(static or non-static). */
#define DECL_FUNCTION_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) || DECL_STATIC_FUNCTION_P (NODE))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as const X *const. */
#define DECL_CONST_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_CONST_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for FUNCTION_DECL means that this member function
has `this' as volatile X *const. */
#define DECL_VOLATILE_MEMFUNC_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
&& CP_TYPE_VOLATILE_P (TREE_TYPE (TREE_VALUE \
(TYPE_ARG_TYPES (TREE_TYPE (NODE))))))
/* Nonzero for a DECL means that this member is a non-static member. */
#define DECL_NONSTATIC_MEMBER_P(NODE) \
(DECL_NONSTATIC_MEMBER_FUNCTION_P (NODE) \
|| TREE_CODE (NODE) == FIELD_DECL)
/* Nonzero for _DECL means that this member object type
is mutable. */
#define DECL_MUTABLE_P(NODE) (DECL_LANG_FLAG_0 (NODE))
/* Nonzero for _DECL means that this constructor or conversion function is
non-converting. */
#define DECL_NONCONVERTING_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->nonconverting)
/* Nonzero for FUNCTION_DECL means that this member function is a pure
virtual function. */
#define DECL_PURE_VIRTUAL_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pure_virtual)
/* True (in a FUNCTION_DECL) if NODE is a virtual function that is an
invalid overrider for a function from a base class. Once we have
complained about an invalid overrider we avoid complaining about it
again. */
#define DECL_INVALID_OVERRIDER_P(NODE) \
(DECL_LANG_FLAG_4 (NODE))
/* True (in a FUNCTION_DECL) if NODE is a function declared with
an override virt-specifier */
#define DECL_OVERRIDE_P(NODE) (TREE_LANG_FLAG_0 (NODE))
/* The thunks associated with NODE, a FUNCTION_DECL. */
#define DECL_THUNKS(NODE) \
(DECL_VIRTUAL_P (NODE) ? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* Set DECL_THUNKS. */
#define SET_DECL_THUNKS(NODE,THUNKS) \
(LANG_DECL_FN_CHECK (NODE)->context = (THUNKS))
/* If NODE, a FUNCTION_DECL, is a C++11 inheriting constructor, then this
is the constructor it inherits from. */
#define DECL_INHERITED_CTOR(NODE) \
(DECL_DECLARES_FUNCTION_P (NODE) && DECL_CONSTRUCTOR_P (NODE) \
? LANG_DECL_FN_CHECK (NODE)->context : NULL_TREE)
/* And this is the base that constructor comes from. */
#define DECL_INHERITED_CTOR_BASE(NODE) \
(DECL_INHERITED_CTOR (NODE) \
? DECL_CONTEXT (flag_new_inheriting_ctors \
? strip_inheriting_ctors (NODE) \
: DECL_INHERITED_CTOR (NODE)) \
: NULL_TREE)
/* Set the inherited base. */
#define SET_DECL_INHERITED_CTOR(NODE,INH) \
(LANG_DECL_FN_CHECK (NODE)->context = (INH))
/* Nonzero if NODE is a thunk, rather than an ordinary function. */
#define DECL_THUNK_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL \
&& DECL_LANG_SPECIFIC (NODE) \
&& LANG_DECL_FN_CHECK (NODE)->thunk_p)
/* Set DECL_THUNK_P for node. */
#define SET_DECL_THUNK_P(NODE, THIS_ADJUSTING) \
(LANG_DECL_FN_CHECK (NODE)->thunk_p = 1, \
LANG_DECL_FN_CHECK (NODE)->this_thunk_p = (THIS_ADJUSTING))
/* Nonzero if NODE is a this pointer adjusting thunk. */
#define DECL_THIS_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a result pointer adjusting thunk. */
#define DECL_RESULT_THUNK_P(NODE) \
(DECL_THUNK_P (NODE) && !LANG_DECL_FN_CHECK (NODE)->this_thunk_p)
/* Nonzero if NODE is a FUNCTION_DECL, but not a thunk. */
#define DECL_NON_THUNK_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL && !DECL_THUNK_P (NODE))
/* Nonzero if NODE is `extern "C"'. */
#define DECL_EXTERN_C_P(NODE) \
(DECL_LANGUAGE (NODE) == lang_c)
/* Nonzero if NODE is an `extern "C"' function. */
#define DECL_EXTERN_C_FUNCTION_P(NODE) \
(DECL_NON_THUNK_FUNCTION_P (NODE) && DECL_EXTERN_C_P (NODE))
/* True iff DECL is an entity with vague linkage whose definition is
available in this translation unit. */
#define DECL_REPO_AVAILABLE_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.repo_available_p)
/* True if DECL is declared 'constexpr'. */
#define DECL_DECLARED_CONSTEXPR_P(DECL) \
DECL_LANG_FLAG_8 (VAR_OR_FUNCTION_DECL_CHECK (STRIP_TEMPLATE (DECL)))
// True if NODE was declared as 'concept'. The flag implies that the
// declaration is constexpr, that the declaration cannot be specialized or
// refined, and that the result type must be convertible to bool.
#define DECL_DECLARED_CONCEPT_P(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.concept_p)
/* Nonzero if this DECL is the __PRETTY_FUNCTION__ variable in a
template function. */
#define DECL_PRETTY_FUNCTION_P(NODE) \
(DECL_NAME (NODE) \
&& id_equal (DECL_NAME (NODE), "__PRETTY_FUNCTION__"))
/* Nonzero if the variable was declared to be thread-local.
We need a special C++ version of this test because the middle-end
DECL_THREAD_LOCAL_P uses the symtab, so we can't use it for
templates. */
#define CP_DECL_THREAD_LOCAL_P(NODE) \
(TREE_LANG_FLAG_0 (VAR_DECL_CHECK (NODE)))
/* The _TYPE context in which this _DECL appears. This field holds the
class where a virtual function instance is actually defined. */
#define DECL_CLASS_CONTEXT(NODE) \
(DECL_CLASS_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : NULL_TREE)
/* For a non-member friend function, the class (if any) in which this
friend was defined. For example, given:
struct S { friend void f () { ... } };
the DECL_FRIEND_CONTEXT for `f' will be `S'. */
#define DECL_FRIEND_CONTEXT(NODE) \
((DECL_DECLARES_FUNCTION_P (NODE) \
&& DECL_FRIEND_P (NODE) && !DECL_FUNCTION_MEMBER_P (NODE)) \
? LANG_DECL_FN_CHECK (NODE)->context \
: NULL_TREE)
/* Set the DECL_FRIEND_CONTEXT for NODE to CONTEXT. */
#define SET_DECL_FRIEND_CONTEXT(NODE, CONTEXT) \
(LANG_DECL_FN_CHECK (NODE)->context = (CONTEXT))
#define CP_DECL_CONTEXT(NODE) \
(!DECL_FILE_SCOPE_P (NODE) ? DECL_CONTEXT (NODE) : global_namespace)
#define CP_TYPE_CONTEXT(NODE) \
(!TYPE_FILE_SCOPE_P (NODE) ? TYPE_CONTEXT (NODE) : global_namespace)
#define FROB_CONTEXT(NODE) \
((NODE) == global_namespace ? DECL_CONTEXT (NODE) : (NODE))
/* 1 iff NODE has namespace scope, including the global namespace. */
#define DECL_NAMESPACE_SCOPE_P(NODE) \
(!DECL_TEMPLATE_PARM_P (NODE) \
&& TREE_CODE (CP_DECL_CONTEXT (NODE)) == NAMESPACE_DECL)
#define TYPE_NAMESPACE_SCOPE_P(NODE) \
(TREE_CODE (CP_TYPE_CONTEXT (NODE)) == NAMESPACE_DECL)
#define NAMESPACE_SCOPE_P(NODE) \
((DECL_P (NODE) && DECL_NAMESPACE_SCOPE_P (NODE)) \
|| (TYPE_P (NODE) && TYPE_NAMESPACE_SCOPE_P (NODE)))
/* 1 iff NODE is a class member. */
#define DECL_CLASS_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) && TYPE_P (DECL_CONTEXT (NODE)))
#define TYPE_CLASS_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TYPE_P (TYPE_CONTEXT (NODE)))
/* 1 iff NODE is function-local. */
#define DECL_FUNCTION_SCOPE_P(NODE) \
(DECL_CONTEXT (NODE) \
&& TREE_CODE (DECL_CONTEXT (NODE)) == FUNCTION_DECL)
#define TYPE_FUNCTION_SCOPE_P(NODE) \
(TYPE_CONTEXT (NODE) && TREE_CODE (TYPE_CONTEXT (NODE)) == FUNCTION_DECL)
/* 1 iff VAR_DECL node NODE is a type-info decl. This flag is set for
both the primary typeinfo object and the associated NTBS name. */
#define DECL_TINFO_P(NODE) TREE_LANG_FLAG_4 (VAR_DECL_CHECK (NODE))
/* 1 iff VAR_DECL node NODE is virtual table or VTT. */
#define DECL_VTABLE_OR_VTT_P(NODE) TREE_LANG_FLAG_5 (VAR_DECL_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has a ref-qualifier (either & or &&). */
#define FUNCTION_REF_QUALIFIED(NODE) \
TREE_LANG_FLAG_4 (FUNC_OR_METHOD_CHECK (NODE))
/* 1 iff FUNCTION_TYPE or METHOD_TYPE has &&-ref-qualifier. */
#define FUNCTION_RVALUE_QUALIFIED(NODE) \
TREE_LANG_FLAG_5 (FUNC_OR_METHOD_CHECK (NODE))
/* Returns 1 iff VAR_DECL is a construction virtual table.
DECL_VTABLE_OR_VTT_P will be true in this case and must be checked
before using this macro. */
#define DECL_CONSTRUCTION_VTABLE_P(NODE) \
TREE_LANG_FLAG_6 (VAR_DECL_CHECK (NODE))
/* 1 iff NODE is function-local, but for types. */
#define LOCAL_CLASS_P(NODE) \
(decl_function_context (TYPE_MAIN_DECL (NODE)) != NULL_TREE)
/* The nesting depth of namespace, class or function. Makes is_ancestor much
simpler. Only 8 bits available. */
#define SCOPE_DEPTH(NODE) \
(NAMESPACE_DECL_CHECK (NODE)->base.u.bits.address_space)
/* Whether the namepace is an inline namespace. */
#define DECL_NAMESPACE_INLINE_P(NODE) \
TREE_LANG_FLAG_0 (NAMESPACE_DECL_CHECK (NODE))
/* In a NAMESPACE_DECL, a vector of using directives. */
#define DECL_NAMESPACE_USING(NODE) \
(LANG_DECL_NS_CHECK (NODE)->usings)
/* In a NAMESPACE_DECL, a vector of inline namespaces. */
#define DECL_NAMESPACE_INLINEES(NODE) \
(LANG_DECL_NS_CHECK (NODE)->inlinees)
/* Pointer to hash_map from IDENTIFIERS to DECLS */
#define DECL_NAMESPACE_BINDINGS(NODE) \
(LANG_DECL_NS_CHECK (NODE)->bindings)
/* In a NAMESPACE_DECL, points to the original namespace if this is
a namespace alias. */
#define DECL_NAMESPACE_ALIAS(NODE) \
DECL_ABSTRACT_ORIGIN (NAMESPACE_DECL_CHECK (NODE))
#define ORIGINAL_NAMESPACE(NODE) \
(DECL_NAMESPACE_ALIAS (NODE) ? DECL_NAMESPACE_ALIAS (NODE) : (NODE))
/* Nonzero if NODE is the std namespace. */
#define DECL_NAMESPACE_STD_P(NODE) \
(TREE_CODE (NODE) == NAMESPACE_DECL \
&& CP_DECL_CONTEXT (NODE) == global_namespace \
&& DECL_NAME (NODE) == std_identifier)
/* In a TREE_LIST in an attribute list, indicates that the attribute
must be applied at instantiation time. */
#define ATTR_IS_DEPENDENT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
/* In a TREE_LIST in the argument of attribute abi_tag, indicates that the tag
was inherited from a template parameter, not explicitly indicated. */
#define ABI_TAG_IMPLICIT(NODE) TREE_LANG_FLAG_0 (TREE_LIST_CHECK (NODE))
extern tree decl_shadowed_for_var_lookup (tree);
extern void decl_shadowed_for_var_insert (tree, tree);
/* Non zero if this is a using decl for a dependent scope. */
#define DECL_DEPENDENT_P(NODE) DECL_LANG_FLAG_0 (USING_DECL_CHECK (NODE))
/* The scope named in a using decl. */
#define USING_DECL_SCOPE(NODE) TREE_TYPE (USING_DECL_CHECK (NODE))
/* The decls named by a using decl. */
#define USING_DECL_DECLS(NODE) DECL_INITIAL (USING_DECL_CHECK (NODE))
/* Non zero if the using decl refers to a dependent type. */
#define USING_DECL_TYPENAME_P(NODE) DECL_LANG_FLAG_1 (USING_DECL_CHECK (NODE))
/* In a VAR_DECL, true if we have a shadowed local variable
in the shadowed var table for this VAR_DECL. */
#define DECL_HAS_SHADOWED_FOR_VAR_P(NODE) \
(VAR_DECL_CHECK (NODE)->decl_with_vis.shadowed_for_var_p)
/* In a VAR_DECL for a variable declared in a for statement,
this is the shadowed (local) variable. */
#define DECL_SHADOWED_FOR_VAR(NODE) \
(DECL_HAS_SHADOWED_FOR_VAR_P(NODE) ? decl_shadowed_for_var_lookup (NODE) : NULL)
#define SET_DECL_SHADOWED_FOR_VAR(NODE, VAL) \
(decl_shadowed_for_var_insert (NODE, VAL))
/* In a FUNCTION_DECL, this is nonzero if this function was defined in
the class definition. We have saved away the text of the function,
but have not yet processed it. */
#define DECL_PENDING_INLINE_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->pending_inline_p)
/* If DECL_PENDING_INLINE_P holds, this is the saved text of the
function. */
#define DECL_PENDING_INLINE_INFO(NODE) \
(LANG_DECL_FN_CHECK (NODE)->u.pending_inline_info)
/* Nonzero for TYPE_DECL means that it was written 'using name = type'. */
#define TYPE_DECL_ALIAS_P(NODE) \
DECL_LANG_FLAG_6 (TYPE_DECL_CHECK (NODE))
/* Nonzero for TEMPLATE_DECL means that it is a 'complex' alias template. */
#define TEMPLATE_DECL_COMPLEX_ALIAS_P(NODE) \
DECL_LANG_FLAG_2 (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a type which is an alias for another type; i.e, a type
which declaration was written 'using name-of-type =
another-type'. */
#define TYPE_ALIAS_P(NODE) \
(TYPE_P (NODE) \
&& TYPE_NAME (NODE) \
&& TREE_CODE (TYPE_NAME (NODE)) == TYPE_DECL \
&& TYPE_DECL_ALIAS_P (TYPE_NAME (NODE)))
/* If non-NULL for a VAR_DECL, FUNCTION_DECL, TYPE_DECL or
TEMPLATE_DECL, the entity is either a template specialization (if
DECL_USE_TEMPLATE is nonzero) or the abstract instance of the
template itself.
In either case, DECL_TEMPLATE_INFO is a TREE_LIST, whose
TREE_PURPOSE is the TEMPLATE_DECL of which this entity is a
specialization or abstract instance. The TREE_VALUE is the
template arguments used to specialize the template.
Consider:
template <typename T> struct S { friend void f(T) {} };
In this case, S<int>::f is, from the point of view of the compiler,
an instantiation of a template -- but, from the point of view of
the language, each instantiation of S results in a wholly unrelated
global function f. In this case, DECL_TEMPLATE_INFO for S<int>::f
will be non-NULL, but DECL_USE_TEMPLATE will be zero. */
#define DECL_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (VAR_TEMPL_TYPE_FIELD_OR_FUNCTION_DECL_CHECK (NODE)) \
->u.min.template_info)
/* For a lambda capture proxy, its captured variable. */
#define DECL_CAPTURED_VARIABLE(NODE) \
(LANG_DECL_U2_CHECK (NODE, 0)->access)
/* For a VAR_DECL, indicates that the variable is actually a
non-static data member of anonymous union that has been promoted to
variable status. */
#define DECL_ANON_UNION_VAR_P(NODE) \
(DECL_LANG_FLAG_4 (VAR_DECL_CHECK (NODE)))
/* Template information for a RECORD_TYPE or UNION_TYPE. */
#define CLASSTYPE_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (RECORD_OR_UNION_CHECK (NODE)))
/* Template information for a template template parameter. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_INFO(NODE) \
(TYPE_LANG_SLOT_1 (BOUND_TEMPLATE_TEMPLATE_PARM_TYPE_CHECK (NODE)))
/* Template information for an ENUMERAL_, RECORD_, UNION_TYPE, or
BOUND_TEMPLATE_TEMPLATE_PARM type. This ignores any alias
templateness of NODE. It'd be nice if this could unconditionally
access the slot, rather than return NULL if given a
non-templatable type. */
#define TYPE_TEMPLATE_INFO(NODE) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM \
|| RECORD_OR_UNION_TYPE_P (NODE) \
? TYPE_LANG_SLOT_1 (NODE) : NULL_TREE)
/* Template information (if any) for an alias type. */
#define TYPE_ALIAS_TEMPLATE_INFO(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
? DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) \
: NULL_TREE)
/* If NODE is a type alias, this accessor returns the template info
for the alias template (if any). Otherwise behave as
TYPE_TEMPLATE_INFO. */
#define TYPE_TEMPLATE_INFO_MAYBE_ALIAS(NODE) \
(TYPE_ALIAS_P (NODE) \
? TYPE_ALIAS_TEMPLATE_INFO (NODE) \
: TYPE_TEMPLATE_INFO (NODE))
/* Set the template information for an ENUMERAL_, RECORD_, or
UNION_TYPE to VAL. */
#define SET_TYPE_TEMPLATE_INFO(NODE, VAL) \
(TREE_CODE (NODE) == ENUMERAL_TYPE \
|| (CLASS_TYPE_P (NODE) && !TYPE_ALIAS_P (NODE)) \
? (TYPE_LANG_SLOT_1 (NODE) = (VAL)) \
: (DECL_TEMPLATE_INFO (TYPE_NAME (NODE)) = (VAL)))
#define TI_TEMPLATE(NODE) TREE_TYPE (TEMPLATE_INFO_CHECK (NODE))
#define TI_ARGS(NODE) TREE_CHAIN (TEMPLATE_INFO_CHECK (NODE))
#define TI_PENDING_TEMPLATE_FLAG(NODE) TREE_LANG_FLAG_1 (NODE)
/* For a given TREE_VEC containing a template argument list,
this property contains the number of arguments that are not
defaulted. */
#define NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) TREE_CHAIN (TREE_VEC_CHECK (NODE))
/* Below are the setter and getter of the NON_DEFAULT_TEMPLATE_ARGS_COUNT
property. */
#define SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE, INT_VALUE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) = build_int_cst (NULL_TREE, INT_VALUE)
#if CHECKING_P
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE))
#else
#define GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT(NODE) \
NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE) \
? int_cst_value (NON_DEFAULT_TEMPLATE_ARGS_COUNT (NODE)) \
: TREE_VEC_LENGTH (INNERMOST_TEMPLATE_ARGS (NODE))
#endif
/* The list of typedefs - used in the template - that need
access checking at template instantiation time.
FIXME this should be associated with the TEMPLATE_DECL, not the
TEMPLATE_INFO. */
#define TI_TYPEDEFS_NEEDING_ACCESS_CHECKING(NODE) \
((struct tree_template_info*)TEMPLATE_INFO_CHECK \
(NODE))->typedefs_needing_access_checking
/* We use TREE_VECs to hold template arguments. If there is only one
level of template arguments, then the TREE_VEC contains the
arguments directly. If there is more than one level of template
arguments, then each entry in the TREE_VEC is itself a TREE_VEC,
containing the template arguments for a single level. The first
entry in the outer TREE_VEC is the outermost level of template
parameters; the last is the innermost.
It is incorrect to ever form a template argument vector containing
only one level of arguments, but which is a TREE_VEC containing as
its only entry the TREE_VEC for that level.
For each TREE_VEC containing the template arguments for a single
level, it's possible to get or set the number of non defaulted
template arguments by using the accessor macros
GET_NON_DEFAULT_TEMPLATE_ARGS_COUNT or
SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT. */
/* Nonzero if the template arguments is actually a vector of vectors,
rather than just a vector. */
#define TMPL_ARGS_HAVE_MULTIPLE_LEVELS(NODE) \
(NODE && TREE_VEC_LENGTH (NODE) && TREE_VEC_ELT (NODE, 0) \
&& TREE_CODE (TREE_VEC_ELT (NODE, 0)) == TREE_VEC)
/* The depth of a template argument vector. When called directly by
the parser, we use a TREE_LIST rather than a TREE_VEC to represent
template arguments. In fact, we may even see NULL_TREE if there
are no template arguments. In both of those cases, there is only
one level of template arguments. */
#define TMPL_ARGS_DEPTH(NODE) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (NODE) ? TREE_VEC_LENGTH (NODE) : 1)
/* The LEVELth level of the template ARGS. The outermost level of
args is level 1, not level 0. */
#define TMPL_ARGS_LEVEL(ARGS, LEVEL) \
(TMPL_ARGS_HAVE_MULTIPLE_LEVELS (ARGS) \
? TREE_VEC_ELT (ARGS, (LEVEL) - 1) : (ARGS))
/* Set the LEVELth level of the template ARGS to VAL. This macro does
not work with single-level argument vectors. */
#define SET_TMPL_ARGS_LEVEL(ARGS, LEVEL, VAL) \
(TREE_VEC_ELT (ARGS, (LEVEL) - 1) = (VAL))
/* Accesses the IDXth parameter in the LEVELth level of the ARGS. */
#define TMPL_ARG(ARGS, LEVEL, IDX) \
(TREE_VEC_ELT (TMPL_ARGS_LEVEL (ARGS, LEVEL), IDX))
/* Given a single level of template arguments in NODE, return the
number of arguments. */
#define NUM_TMPL_ARGS(NODE) \
(TREE_VEC_LENGTH (NODE))
/* Returns the innermost level of template arguments in ARGS. */
#define INNERMOST_TEMPLATE_ARGS(NODE) \
(get_innermost_template_args ((NODE), 1))
/* The number of levels of template parameters given by NODE. */
#define TMPL_PARMS_DEPTH(NODE) \
((HOST_WIDE_INT) TREE_INT_CST_LOW (TREE_PURPOSE (NODE)))
/* The TEMPLATE_DECL instantiated or specialized by NODE. This
TEMPLATE_DECL will be the immediate parent, not the most general
template. For example, in:
template <class T> struct S { template <class U> void f(U); }
the FUNCTION_DECL for S<int>::f<double> will have, as its
DECL_TI_TEMPLATE, `template <class U> S<int>::f<U>'.
As a special case, for a member friend template of a template
class, this value will not be a TEMPLATE_DECL, but rather an
IDENTIFIER_NODE or OVERLOAD indicating the name of the template and
any explicit template arguments provided. For example, in:
template <class T> struct S { friend void f<int>(int, double); }
the DECL_TI_TEMPLATE will be an IDENTIFIER_NODE for `f' and the
DECL_TI_ARGS will be {int}.
For a FIELD_DECL with a non-static data member initializer, this value
is the FIELD_DECL it was instantiated from. */
#define DECL_TI_TEMPLATE(NODE) TI_TEMPLATE (DECL_TEMPLATE_INFO (NODE))
/* The template arguments used to obtain this decl from the most
general form of DECL_TI_TEMPLATE. For the example given for
DECL_TI_TEMPLATE, the DECL_TI_ARGS will be {int, double}. These
are always the full set of arguments required to instantiate this
declaration from the most general template specialized here. */
#define DECL_TI_ARGS(NODE) TI_ARGS (DECL_TEMPLATE_INFO (NODE))
/* The TEMPLATE_DECL associated with NODE, a class type. Even if NODE
will be generated from a partial specialization, the TEMPLATE_DECL
referred to here will be the original template. For example,
given:
template <typename T> struct S {};
template <typename T> struct S<T*> {};
the CLASSTPYE_TI_TEMPLATE for S<int*> will be S, not the S<T*>. */
#define CLASSTYPE_TI_TEMPLATE(NODE) TI_TEMPLATE (CLASSTYPE_TEMPLATE_INFO (NODE))
#define CLASSTYPE_TI_ARGS(NODE) TI_ARGS (CLASSTYPE_TEMPLATE_INFO (NODE))
/* For a template instantiation TYPE, returns the TYPE corresponding
to the primary template. Otherwise returns TYPE itself. */
#define CLASSTYPE_PRIMARY_TEMPLATE_TYPE(TYPE) \
((CLASSTYPE_USE_TEMPLATE ((TYPE)) \
&& !CLASSTYPE_TEMPLATE_SPECIALIZATION ((TYPE))) \
? TREE_TYPE (DECL_TEMPLATE_RESULT (DECL_PRIMARY_TEMPLATE \
(CLASSTYPE_TI_TEMPLATE ((TYPE))))) \
: (TYPE))
/* Like CLASS_TI_TEMPLATE, but also works for ENUMERAL_TYPEs. */
#define TYPE_TI_TEMPLATE(NODE) \
(TI_TEMPLATE (TYPE_TEMPLATE_INFO (NODE)))
/* Like DECL_TI_ARGS, but for an ENUMERAL_, RECORD_, or UNION_TYPE. */
#define TYPE_TI_ARGS(NODE) \
(TI_ARGS (TYPE_TEMPLATE_INFO (NODE)))
#define INNERMOST_TEMPLATE_PARMS(NODE) TREE_VALUE (NODE)
/* Nonzero if NODE (a TEMPLATE_DECL) is a member template, in the
sense of [temp.mem]. */
#define DECL_MEMBER_TEMPLATE_P(NODE) \
(DECL_LANG_FLAG_1 (TEMPLATE_DECL_CHECK (NODE)))
/* Nonzero if the NODE corresponds to the template parameters for a
member template, whose inline definition is being processed after
the class definition is complete. */
#define TEMPLATE_PARMS_FOR_INLINE(NODE) TREE_LANG_FLAG_1 (NODE)
/* Determine if a declaration (PARM_DECL or FIELD_DECL) is a pack. */
#define DECL_PACK_P(NODE) \
(DECL_P (NODE) && PACK_EXPANSION_P (TREE_TYPE (NODE)))
/* Determines if NODE is an expansion of one or more parameter packs,
e.g., a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_P(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
|| TREE_CODE (NODE) == EXPR_PACK_EXPANSION)
/* Extracts the type or expression pattern from a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define PACK_EXPANSION_PATTERN(NODE) \
(TREE_CODE (NODE) == TYPE_PACK_EXPANSION ? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Sets the type or expression pattern for a TYPE_PACK_EXPANSION or
EXPR_PACK_EXPANSION. */
#define SET_PACK_EXPANSION_PATTERN(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_PACK_EXPANSION) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* The list of parameter packs used in the PACK_EXPANSION_* node. The
TREE_VALUE of each TREE_LIST contains the parameter packs. */
#define PACK_EXPANSION_PARAMETER_PACKS(NODE) \
*(TREE_CODE (NODE) == EXPR_PACK_EXPANSION \
? &TREE_OPERAND (NODE, 1) \
: &TYPE_MIN_VALUE_RAW (TYPE_PACK_EXPANSION_CHECK (NODE)))
/* Any additional template args to be applied when substituting into
the pattern, set by tsubst_pack_expansion for partial instantiations.
If this is a TREE_LIST, the TREE_VALUE of the first element is the
usual template argument TREE_VEC, and the TREE_PURPOSE of later elements
are enclosing functions that provided function parameter packs we'll need
to map appropriately. */
#define PACK_EXPANSION_EXTRA_ARGS(NODE) \
*(TREE_CODE (NODE) == TYPE_PACK_EXPANSION \
? &TYPE_MAX_VALUE_RAW (NODE) \
: &TREE_OPERAND ((NODE), 2))
/* True iff this pack expansion is within a function context. */
#define PACK_EXPANSION_LOCAL_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* True iff this pack expansion is for sizeof.... */
#define PACK_EXPANSION_SIZEOF_P(NODE) TREE_LANG_FLAG_1 (NODE)
/* True iff the wildcard can match a template parameter pack. */
#define WILDCARD_PACK_P(NODE) TREE_LANG_FLAG_0 (NODE)
/* Determine if this is an argument pack. */
#define ARGUMENT_PACK_P(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK \
|| TREE_CODE (NODE) == NONTYPE_ARGUMENT_PACK)
/* The arguments stored in an argument pack. Arguments are stored in a
TREE_VEC, which may have length zero. */
#define ARGUMENT_PACK_ARGS(NODE) \
(TREE_CODE (NODE) == TYPE_ARGUMENT_PACK? TREE_TYPE (NODE) \
: TREE_OPERAND (NODE, 0))
/* Set the arguments stored in an argument pack. VALUE must be a
TREE_VEC. */
#define SET_ARGUMENT_PACK_ARGS(NODE,VALUE) \
if (TREE_CODE (NODE) == TYPE_ARGUMENT_PACK) \
TREE_TYPE (NODE) = VALUE; \
else \
TREE_OPERAND (NODE, 0) = VALUE
/* Whether the argument pack is "incomplete", meaning that more
arguments can still be deduced. Incomplete argument packs are only
used when the user has provided an explicit template argument list
for a variadic function template. Some of the explicit template
arguments will be placed into the beginning of the argument pack,
but additional arguments might still be deduced. */
#define ARGUMENT_PACK_INCOMPLETE_P(NODE) \
TREE_ADDRESSABLE (ARGUMENT_PACK_ARGS (NODE))
/* When ARGUMENT_PACK_INCOMPLETE_P, stores the explicit template
arguments used to fill this pack. */
#define ARGUMENT_PACK_EXPLICIT_ARGS(NODE) \
TREE_TYPE (ARGUMENT_PACK_ARGS (NODE))
/* In an ARGUMENT_PACK_SELECT, the argument pack from which an
argument will be selected. */
#define ARGUMENT_PACK_SELECT_FROM_PACK(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->argument_pack)
/* In an ARGUMENT_PACK_SELECT, the index of the argument we want to
select. */
#define ARGUMENT_PACK_SELECT_INDEX(NODE) \
(((struct tree_argument_pack_select *)ARGUMENT_PACK_SELECT_CHECK (NODE))->index)
#define FOLD_EXPR_CHECK(NODE) \
TREE_CHECK4 (NODE, UNARY_LEFT_FOLD_EXPR, UNARY_RIGHT_FOLD_EXPR, \
BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
#define BINARY_FOLD_EXPR_CHECK(NODE) \
TREE_CHECK2 (NODE, BINARY_LEFT_FOLD_EXPR, BINARY_RIGHT_FOLD_EXPR)
/* True if NODE is UNARY_FOLD_EXPR or a BINARY_FOLD_EXPR */
#define FOLD_EXPR_P(NODE) \
(TREE_CODE (NODE) == UNARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == UNARY_RIGHT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_LEFT_FOLD_EXPR \
|| TREE_CODE (NODE) == BINARY_RIGHT_FOLD_EXPR)
/* True when NODE is a fold over a compound assignment operator. */
#define FOLD_EXPR_MODIFY_P(NODE) \
TREE_LANG_FLAG_0 (FOLD_EXPR_CHECK (NODE))
/* An INTEGER_CST containing the tree code of the folded operator. */
#define FOLD_EXPR_OP(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 0)
/* The expression containing an unexpanded parameter pack. */
#define FOLD_EXPR_PACK(NODE) \
TREE_OPERAND (FOLD_EXPR_CHECK (NODE), 1)
/* In a binary fold expression, the argument with no unexpanded
parameter packs. */
#define FOLD_EXPR_INIT(NODE) \
TREE_OPERAND (BINARY_FOLD_EXPR_CHECK (NODE), 2)
/* In a FUNCTION_DECL, the saved language-specific per-function data. */
#define DECL_SAVED_FUNCTION_DATA(NODE) \
(LANG_DECL_FN_CHECK (FUNCTION_DECL_CHECK (NODE)) \
->u.saved_language_function)
/* True if NODE is an implicit INDIRECT_EXPR from convert_from_reference. */
#define REFERENCE_REF_P(NODE) \
(INDIRECT_REF_P (NODE) \
&& TREE_TYPE (TREE_OPERAND (NODE, 0)) \
&& (TREE_CODE (TREE_TYPE (TREE_OPERAND ((NODE), 0))) \
== REFERENCE_TYPE))
/* True if NODE is a REFERENCE_TYPE which is OK to instantiate to be a
reference to VLA type, because it's used for VLA capture. */
#define REFERENCE_VLA_OK(NODE) \
(TYPE_LANG_FLAG_5 (REFERENCE_TYPE_CHECK (NODE)))
#define NEW_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (NEW_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_GLOBAL(NODE) \
TREE_LANG_FLAG_0 (DELETE_EXPR_CHECK (NODE))
#define DELETE_EXPR_USE_VEC(NODE) \
TREE_LANG_FLAG_1 (DELETE_EXPR_CHECK (NODE))
#define CALL_OR_AGGR_INIT_CHECK(NODE) \
TREE_CHECK2 ((NODE), CALL_EXPR, AGGR_INIT_EXPR)
/* Indicates that this is a non-dependent COMPOUND_EXPR which will
resolve to a function call. */
#define COMPOUND_EXPR_OVERLOADED(NODE) \
TREE_LANG_FLAG_0 (COMPOUND_EXPR_CHECK (NODE))
/* In a CALL_EXPR appearing in a template, true if Koenig lookup
should be performed at instantiation time. */
#define KOENIG_LOOKUP_P(NODE) TREE_LANG_FLAG_0 (CALL_EXPR_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in left-to-right
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_ORDERED_ARGS(NODE) \
TREE_LANG_FLAG_3 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if the arguments to NODE should be evaluated in right-to-left
order regardless of PUSH_ARGS_REVERSED. */
#define CALL_EXPR_REVERSE_ARGS(NODE) \
TREE_LANG_FLAG_5 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* True if CALL_EXPR was written as an operator expression, not a function
call. */
#define CALL_EXPR_OPERATOR_SYNTAX(NODE) \
TREE_LANG_FLAG_6 (CALL_OR_AGGR_INIT_CHECK (NODE))
/* Indicates whether a string literal has been parenthesized. Such
usages are disallowed in certain circumstances. */
#define PAREN_STRING_LITERAL_P(NODE) \
TREE_LANG_FLAG_0 (STRING_CST_CHECK (NODE))
/* Indicates whether a COMPONENT_REF or a SCOPE_REF has been parenthesized, or
an INDIRECT_REF comes from parenthesizing a _DECL. Currently only set some
of the time in C++14 mode. */
#define REF_PARENTHESIZED_P(NODE) \
TREE_LANG_FLAG_2 (TREE_CHECK3 ((NODE), COMPONENT_REF, INDIRECT_REF, SCOPE_REF))
/* Nonzero if this AGGR_INIT_EXPR provides for initialization via a
constructor call, rather than an ordinary function call. */
#define AGGR_INIT_VIA_CTOR_P(NODE) \
TREE_LANG_FLAG_0 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero if expanding this AGGR_INIT_EXPR should first zero-initialize
the object. */
#define AGGR_INIT_ZERO_FIRST(NODE) \
TREE_LANG_FLAG_2 (AGGR_INIT_EXPR_CHECK (NODE))
/* Nonzero means that the call is the jump from a thunk to the
thunked-to function. */
#define AGGR_INIT_FROM_THUNK_P(NODE) \
(AGGR_INIT_EXPR_CHECK (NODE)->base.protected_flag)
/* AGGR_INIT_EXPR accessors. These are equivalent to the CALL_EXPR
accessors, except for AGGR_INIT_EXPR_SLOT (which takes the place of
CALL_EXPR_STATIC_CHAIN). */
#define AGGR_INIT_EXPR_FN(NODE) TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 1)
#define AGGR_INIT_EXPR_SLOT(NODE) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 2)
#define AGGR_INIT_EXPR_ARG(NODE, I) \
TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), (I) + 3)
#define aggr_init_expr_nargs(NODE) (VL_EXP_OPERAND_LENGTH(NODE) - 3)
/* AGGR_INIT_EXPR_ARGP returns a pointer to the argument vector for NODE.
We can't use &AGGR_INIT_EXPR_ARG (NODE, 0) because that will complain if
the argument count is zero when checking is enabled. Instead, do
the pointer arithmetic to advance past the 3 fixed operands in a
AGGR_INIT_EXPR. That produces a valid pointer to just past the end of
the operand array, even if it's not valid to dereference it. */
#define AGGR_INIT_EXPR_ARGP(NODE) \
(&(TREE_OPERAND (AGGR_INIT_EXPR_CHECK (NODE), 0)) + 3)
/* Abstract iterators for AGGR_INIT_EXPRs. */
/* Structure containing iterator state. */
struct aggr_init_expr_arg_iterator {
tree t; /* the aggr_init_expr */
int n; /* argument count */
int i; /* next argument index */
};
/* Initialize the abstract argument list iterator object ITER with the
arguments from AGGR_INIT_EXPR node EXP. */
inline void
init_aggr_init_expr_arg_iterator (tree exp,
aggr_init_expr_arg_iterator *iter)
{
iter->t = exp;
iter->n = aggr_init_expr_nargs (exp);
iter->i = 0;
}
/* Return the next argument from abstract argument list iterator object ITER,
and advance its state. Return NULL_TREE if there are no more arguments. */
inline tree
next_aggr_init_expr_arg (aggr_init_expr_arg_iterator *iter)
{
tree result;
if (iter->i >= iter->n)
return NULL_TREE;
result = AGGR_INIT_EXPR_ARG (iter->t, iter->i);
iter->i++;
return result;
}
/* Initialize the abstract argument list iterator object ITER, then advance
past and return the first argument. Useful in for expressions, e.g.
for (arg = first_aggr_init_expr_arg (exp, &iter); arg;
arg = next_aggr_init_expr_arg (&iter)) */
inline tree
first_aggr_init_expr_arg (tree exp, aggr_init_expr_arg_iterator *iter)
{
init_aggr_init_expr_arg_iterator (exp, iter);
return next_aggr_init_expr_arg (iter);
}
/* Test whether there are more arguments in abstract argument list iterator
ITER, without changing its state. */
inline bool
more_aggr_init_expr_args_p (const aggr_init_expr_arg_iterator *iter)
{
return (iter->i < iter->n);
}
/* Iterate through each argument ARG of AGGR_INIT_EXPR CALL, using variable
ITER (of type aggr_init_expr_arg_iterator) to hold the iteration state. */
#define FOR_EACH_AGGR_INIT_EXPR_ARG(arg, iter, call) \
for ((arg) = first_aggr_init_expr_arg ((call), &(iter)); (arg); \
(arg) = next_aggr_init_expr_arg (&(iter)))
/* VEC_INIT_EXPR accessors. */
#define VEC_INIT_EXPR_SLOT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 0)
#define VEC_INIT_EXPR_INIT(NODE) TREE_OPERAND (VEC_INIT_EXPR_CHECK (NODE), 1)
/* Indicates that a VEC_INIT_EXPR is a potential constant expression.
Only set when the current function is constexpr. */
#define VEC_INIT_EXPR_IS_CONSTEXPR(NODE) \
TREE_LANG_FLAG_0 (VEC_INIT_EXPR_CHECK (NODE))
/* Indicates that a VEC_INIT_EXPR is expressing value-initialization. */
#define VEC_INIT_EXPR_VALUE_INIT(NODE) \
TREE_LANG_FLAG_1 (VEC_INIT_EXPR_CHECK (NODE))
/* The condition under which this MUST_NOT_THROW_EXPR actually blocks
exceptions. NULL_TREE means 'true'. */
#define MUST_NOT_THROW_COND(NODE) \
TREE_OPERAND (MUST_NOT_THROW_EXPR_CHECK (NODE), 1)
/* The TYPE_MAIN_DECL for a class template type is a TYPE_DECL, not a
TEMPLATE_DECL. This macro determines whether or not a given class
type is really a template type, as opposed to an instantiation or
specialization of one. */
#define CLASSTYPE_IS_TEMPLATE(NODE) \
(CLASSTYPE_TEMPLATE_INFO (NODE) \
&& !CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
/* The name used by the user to name the typename type. Typically,
this is an IDENTIFIER_NODE, and the same as the DECL_NAME on the
corresponding TYPE_DECL. However, this may also be a
TEMPLATE_ID_EXPR if we had something like `typename X::Y<T>'. */
#define TYPENAME_TYPE_FULLNAME(NODE) \
(TYPE_VALUES_RAW (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as an "enum". */
#define TYPENAME_IS_ENUM_P(NODE) \
(TREE_LANG_FLAG_0 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE was declared as a "class", "struct", or
"union". */
#define TYPENAME_IS_CLASS_P(NODE) \
(TREE_LANG_FLAG_1 (TYPENAME_TYPE_CHECK (NODE)))
/* True if a TYPENAME_TYPE is in the process of being resolved. */
#define TYPENAME_IS_RESOLVING_P(NODE) \
(TREE_LANG_FLAG_2 (TYPENAME_TYPE_CHECK (NODE)))
/* [class.virtual]
A class that declares or inherits a virtual function is called a
polymorphic class. */
#define TYPE_POLYMORPHIC_P(NODE) (TREE_LANG_FLAG_2 (NODE))
/* Nonzero if this class has a virtual function table pointer. */
#define TYPE_CONTAINS_VPTR_P(NODE) \
(TYPE_POLYMORPHIC_P (NODE) || CLASSTYPE_VBASECLASSES (NODE))
/* This flag is true of a local VAR_DECL if it was declared in a for
statement, but we are no longer in the scope of the for. */
#define DECL_DEAD_FOR_LOCAL(NODE) DECL_LANG_FLAG_7 (VAR_DECL_CHECK (NODE))
/* This flag is set on a VAR_DECL that is a DECL_DEAD_FOR_LOCAL
if we already emitted a warning about using it. */
#define DECL_ERROR_REPORTED(NODE) DECL_LANG_FLAG_0 (VAR_DECL_CHECK (NODE))
/* Nonzero if NODE is a FUNCTION_DECL (for a function with global
scope) declared in a local scope. */
#define DECL_LOCAL_FUNCTION_P(NODE) \
DECL_LANG_FLAG_0 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'break' stmts. */
#define LABEL_DECL_BREAK(NODE) \
DECL_LANG_FLAG_0 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'continue' stmts. */
#define LABEL_DECL_CONTINUE(NODE) \
DECL_LANG_FLAG_1 (LABEL_DECL_CHECK (NODE))
/* Nonzero if NODE is the target for genericization of 'return' stmts
in constructors/destructors of targetm.cxx.cdtor_returns_this targets. */
#define LABEL_DECL_CDTOR(NODE) \
DECL_LANG_FLAG_2 (LABEL_DECL_CHECK (NODE))
/* True if NODE was declared with auto in its return type, but it has
started compilation and so the return type might have been changed by
return type deduction; its declared return type should be found in
DECL_STRUCT_FUNCTION(NODE)->language->x_auto_return_pattern. */
#define FNDECL_USED_AUTO(NODE) \
TREE_LANG_FLAG_2 (FUNCTION_DECL_CHECK (NODE))
/* Nonzero if NODE is a DECL which we know about but which has not
been explicitly declared, such as a built-in function or a friend
declared inside a class. In the latter case DECL_HIDDEN_FRIEND_P
will be set. */
#define DECL_ANTICIPATED(NODE) \
(DECL_LANG_SPECIFIC (TYPE_FUNCTION_OR_TEMPLATE_DECL_CHECK (NODE)) \
->u.base.anticipated_p)
/* Is DECL NODE a hidden name? */
#define DECL_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (NODE) && TYPE_FUNCTION_OR_TEMPLATE_DECL_P (NODE) \
&& DECL_ANTICIPATED (NODE))
/* True if this is a hidden class type. */
#define TYPE_HIDDEN_P(NODE) \
(DECL_LANG_SPECIFIC (TYPE_NAME (NODE)) \
&& DECL_ANTICIPATED (TYPE_NAME (NODE)))
/* True for artificial decls added for OpenMP privatized non-static
data members. */
#define DECL_OMP_PRIVATIZED_MEMBER(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.anticipated_p)
/* Nonzero if NODE is a FUNCTION_DECL which was declared as a friend
within a class but has not been declared in the surrounding scope.
The function is invisible except via argument dependent lookup. */
#define DECL_HIDDEN_FRIEND_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->hidden_friend_p)
/* Nonzero if NODE is an artificial FUNCTION_DECL for
#pragma omp declare reduction. */
#define DECL_OMP_DECLARE_REDUCTION_P(NODE) \
(LANG_DECL_FN_CHECK (DECL_COMMON_CHECK (NODE))->omp_declare_reduction_p)
/* Nonzero if DECL has been declared threadprivate by
#pragma omp threadprivate. */
#define CP_DECL_THREADPRIVATE_P(DECL) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (DECL))->u.base.threadprivate_or_deleted_p)
/* Nonzero if NODE is a VAR_DECL which has been declared inline. */
#define DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
? DECL_LANG_SPECIFIC (NODE)->u.base.var_declared_inline_p \
: false)
#define SET_DECL_VAR_DECLARED_INLINE_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.var_declared_inline_p \
= true)
/* True if NODE is a constant variable with a value-dependent initializer. */
#define DECL_DEPENDENT_INIT_P(NODE) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE)) \
&& DECL_LANG_SPECIFIC (NODE)->u.base.dependent_init_p)
#define SET_DECL_DEPENDENT_INIT_P(NODE, X) \
(DECL_LANG_SPECIFIC (VAR_DECL_CHECK (NODE))->u.base.dependent_init_p = (X))
/* Nonzero if NODE is an artificial VAR_DECL for a C++17 structured binding
declaration or one of VAR_DECLs for the user identifiers in it. */
#define DECL_DECOMPOSITION_P(NODE) \
(VAR_P (NODE) && DECL_LANG_SPECIFIC (NODE) \
? DECL_LANG_SPECIFIC (NODE)->u.base.selector == lds_decomp \
: false)
/* The underlying artificial VAR_DECL for structured binding. */
#define DECL_DECOMP_BASE(NODE) \
(LANG_DECL_DECOMP_CHECK (NODE)->base)
/* Nonzero if NODE is an inline VAR_DECL. In C++17, static data members
declared with constexpr specifier are implicitly inline variables. */
#define DECL_INLINE_VAR_P(NODE) \
(DECL_VAR_DECLARED_INLINE_P (NODE) \
|| (cxx_dialect >= cxx17 \
&& DECL_DECLARED_CONSTEXPR_P (NODE) \
&& DECL_CLASS_SCOPE_P (NODE)))
/* Nonzero if DECL was declared with '= delete'. */
#define DECL_DELETED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->min.base.threadprivate_or_deleted_p)
/* Nonzero if DECL was declared with '= default' (maybe implicitly). */
#define DECL_DEFAULTED_FN(DECL) \
(LANG_DECL_FN_CHECK (DECL)->defaulted_p)
/* Nonzero if DECL is explicitly defaulted in the class body. */
#define DECL_DEFAULTED_IN_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) && DECL_INITIALIZED_IN_CLASS_P (DECL))
/* Nonzero if DECL was defaulted outside the class body. */
#define DECL_DEFAULTED_OUTSIDE_CLASS_P(DECL) \
(DECL_DEFAULTED_FN (DECL) \
&& !(DECL_ARTIFICIAL (DECL) || DECL_INITIALIZED_IN_CLASS_P (DECL)))
/* Record whether a typedef for type `int' was actually `signed int'. */
#define C_TYPEDEF_EXPLICITLY_SIGNED(EXP) DECL_LANG_FLAG_1 (EXP)
/* Returns nonzero if DECL has external linkage, as specified by the
language standard. (This predicate may hold even when the
corresponding entity is not actually given external linkage in the
object file; see decl_linkage for details.) */
#define DECL_EXTERNAL_LINKAGE_P(DECL) \
(decl_linkage (DECL) == lk_external)
/* Keep these codes in ascending code order. */
#define INTEGRAL_CODE_P(CODE) \
((CODE) == ENUMERAL_TYPE \
|| (CODE) == BOOLEAN_TYPE \
|| (CODE) == INTEGER_TYPE)
/* [basic.fundamental]
Types bool, char, wchar_t, and the signed and unsigned integer types
are collectively called integral types.
Note that INTEGRAL_TYPE_P, as defined in tree.h, allows enumeration
types as well, which is incorrect in C++. Keep these checks in
ascending code order. */
#define CP_INTEGRAL_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == BOOLEAN_TYPE \
|| TREE_CODE (TYPE) == INTEGER_TYPE)
/* Returns true if TYPE is an integral or enumeration name. Keep
these checks in ascending code order. */
#define INTEGRAL_OR_ENUMERATION_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE || CP_INTEGRAL_TYPE_P (TYPE))
/* Returns true if TYPE is an integral or unscoped enumeration type. */
#define INTEGRAL_OR_UNSCOPED_ENUMERATION_TYPE_P(TYPE) \
(UNSCOPED_ENUM_P (TYPE) || CP_INTEGRAL_TYPE_P (TYPE))
/* True if the class type TYPE is a literal type. */
#define CLASSTYPE_LITERAL_P(TYPE) \
(LANG_TYPE_CLASS_CHECK (TYPE)->is_literal)
/* [basic.fundamental]
Integral and floating types are collectively called arithmetic
types.
As a GNU extension, we also accept complex types.
Keep these checks in ascending code order. */
#define ARITHMETIC_TYPE_P(TYPE) \
(CP_INTEGRAL_TYPE_P (TYPE) \
|| TREE_CODE (TYPE) == REAL_TYPE \
|| TREE_CODE (TYPE) == COMPLEX_TYPE)
/* True iff TYPE is cv decltype(nullptr). */
#define NULLPTR_TYPE_P(TYPE) (TREE_CODE (TYPE) == NULLPTR_TYPE)
/* [basic.types]
Arithmetic types, enumeration types, pointer types,
pointer-to-member types, and std::nullptr_t are collectively called
scalar types.
Keep these checks in ascending code order. */
#define SCALAR_TYPE_P(TYPE) \
(TYPE_PTRDATAMEM_P (TYPE) \
|| TREE_CODE (TYPE) == ENUMERAL_TYPE \
|| ARITHMETIC_TYPE_P (TYPE) \
|| TYPE_PTR_P (TYPE) \
|| TYPE_PTRMEMFUNC_P (TYPE) \
|| NULLPTR_TYPE_P (TYPE))
/* Determines whether this type is a C++0x scoped enumeration
type. Scoped enumerations types are introduced via "enum class" or
"enum struct", e.g.,
enum class Color {
Red, Green, Blue
};
Scoped enumeration types are different from normal (unscoped)
enumeration types in several ways:
- The enumerators of a scoped enumeration type are only available
within the scope of the enumeration type and not in the
enclosing scope. For example, the Red color can be referred to
with "Color::Red" but not "Red".
- Scoped enumerators and enumerations do not implicitly convert
to integers or 'bool'.
- The underlying type of the enum is well-defined. */
#define SCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_SCOPED (TYPE))
/* Determine whether this is an unscoped enumeration type. */
#define UNSCOPED_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && !ENUM_IS_SCOPED (TYPE))
/* Set the flag indicating whether an ENUMERAL_TYPE is a C++0x scoped
enumeration type (1) or a normal (unscoped) enumeration type
(0). */
#define SET_SCOPED_ENUM_P(TYPE, VAL) \
(ENUM_IS_SCOPED (TYPE) = (VAL))
#define SET_OPAQUE_ENUM_P(TYPE, VAL) \
(ENUM_IS_OPAQUE (TYPE) = (VAL))
#define OPAQUE_ENUM_P(TYPE) \
(TREE_CODE (TYPE) == ENUMERAL_TYPE && ENUM_IS_OPAQUE (TYPE))
/* Determines whether an ENUMERAL_TYPE has an explicit
underlying type. */
#define ENUM_FIXED_UNDERLYING_TYPE_P(NODE) (TYPE_LANG_FLAG_5 (NODE))
/* Returns the underlying type of the given enumeration type. The
underlying type is determined in different ways, depending on the
properties of the enum:
- In C++0x, the underlying type can be explicitly specified, e.g.,
enum E1 : char { ... } // underlying type is char
- In a C++0x scoped enumeration, the underlying type is int
unless otherwises specified:
enum class E2 { ... } // underlying type is int
- Otherwise, the underlying type is determined based on the
values of the enumerators. In this case, the
ENUM_UNDERLYING_TYPE will not be set until after the definition
of the enumeration is completed by finish_enum. */
#define ENUM_UNDERLYING_TYPE(TYPE) \
TREE_TYPE (ENUMERAL_TYPE_CHECK (TYPE))
/* [dcl.init.aggr]
An aggregate is an array or a class with no user-provided
constructors, no brace-or-equal-initializers for non-static data
members, no private or protected non-static data members, no
base classes, and no virtual functions.
As an extension, we also treat vectors as aggregates. Keep these
checks in ascending code order. */
#define CP_AGGREGATE_TYPE_P(TYPE) \
(TREE_CODE (TYPE) == VECTOR_TYPE \
||TREE_CODE (TYPE) == ARRAY_TYPE \
|| (CLASS_TYPE_P (TYPE) && !CLASSTYPE_NON_AGGREGATE (TYPE)))
/* Nonzero for a class type means that the class type has a
user-declared constructor. */
#define TYPE_HAS_USER_CONSTRUCTOR(NODE) (TYPE_LANG_FLAG_1 (NODE))
/* Nonzero means that the FUNCTION_TYPE or METHOD_TYPE has a
late-specified return type. */
#define TYPE_HAS_LATE_RETURN_TYPE(NODE) \
(TYPE_LANG_FLAG_2 (FUNC_OR_METHOD_CHECK (NODE)))
/* When appearing in an INDIRECT_REF, it means that the tree structure
underneath is actually a call to a constructor. This is needed
when the constructor must initialize local storage (which can
be automatically destroyed), rather than allowing it to allocate
space from the heap.
When appearing in a SAVE_EXPR, it means that underneath
is a call to a constructor.
When appearing in a CONSTRUCTOR, the expression is a
compound literal.
When appearing in a FIELD_DECL, it means that this field
has been duly initialized in its constructor. */
#define TREE_HAS_CONSTRUCTOR(NODE) (TREE_LANG_FLAG_4 (NODE))
/* True if NODE is a brace-enclosed initializer. */
#define BRACE_ENCLOSED_INITIALIZER_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_TYPE (NODE) == init_list_type_node)
/* True if NODE is a compound-literal, i.e., a brace-enclosed
initializer cast to a particular type. */
#define COMPOUND_LITERAL_P(NODE) \
(TREE_CODE (NODE) == CONSTRUCTOR && TREE_HAS_CONSTRUCTOR (NODE))
#define EMPTY_CONSTRUCTOR_P(NODE) (TREE_CODE (NODE) == CONSTRUCTOR \
&& vec_safe_is_empty(CONSTRUCTOR_ELTS(NODE))\
&& !TREE_HAS_CONSTRUCTOR (NODE))
/* True if NODE is a init-list used as a direct-initializer, i.e.
B b{1,2}, not B b({1,2}) or B b = {1,2}. */
#define CONSTRUCTOR_IS_DIRECT_INIT(NODE) (TREE_LANG_FLAG_0 (CONSTRUCTOR_CHECK (NODE)))
/* True if an uninitialized element in NODE should not be treated as
implicitly value-initialized. Only used in constexpr evaluation. */
#define CONSTRUCTOR_NO_IMPLICIT_ZERO(NODE) \
(TREE_LANG_FLAG_1 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR should not be used as a variable initializer
because it was loaded from a constexpr variable with mutable fields. */
#define CONSTRUCTOR_MUTABLE_POISON(NODE) \
(TREE_LANG_FLAG_2 (CONSTRUCTOR_CHECK (NODE)))
/* True if this typed CONSTRUCTOR represents C99 compound-literal syntax rather
than C++11 functional cast syntax. */
#define CONSTRUCTOR_C99_COMPOUND_LITERAL(NODE) \
(TREE_LANG_FLAG_3 (CONSTRUCTOR_CHECK (NODE)))
/* True if this CONSTRUCTOR contains PLACEHOLDER_EXPRs referencing the
CONSTRUCTOR's type not nested inside another CONSTRUCTOR marked with
CONSTRUCTOR_PLACEHOLDER_BOUNDARY. */
#define CONSTRUCTOR_PLACEHOLDER_BOUNDARY(NODE) \
(TREE_LANG_FLAG_5 (CONSTRUCTOR_CHECK (NODE)))
#define DIRECT_LIST_INIT_P(NODE) \
(BRACE_ENCLOSED_INITIALIZER_P (NODE) && CONSTRUCTOR_IS_DIRECT_INIT (NODE))
/* True if NODE represents a conversion for direct-initialization in a
template. Set by perform_implicit_conversion_flags. */
#define IMPLICIT_CONV_EXPR_DIRECT_INIT(NODE) \
(TREE_LANG_FLAG_0 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* True if NODE represents a dependent conversion of a non-type template
argument. Set by maybe_convert_nontype_argument. */
#define IMPLICIT_CONV_EXPR_NONTYPE_ARG(NODE) \
(TREE_LANG_FLAG_1 (IMPLICIT_CONV_EXPR_CHECK (NODE)))
/* Nonzero means that an object of this type can not be initialized using
an initializer list. */
#define CLASSTYPE_NON_AGGREGATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->non_aggregate)
#define TYPE_NON_AGGREGATE_CLASS(NODE) \
(CLASS_TYPE_P (NODE) && CLASSTYPE_NON_AGGREGATE (NODE))
/* Nonzero if there is a non-trivial X::op=(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_assign)
/* Nonzero if there is a non-trivial X::X(cv X&) for this class. */
#define TYPE_HAS_COMPLEX_COPY_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_copy_ctor)
/* Nonzero if there is a non-trivial X::op=(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_ASSIGN(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_assign)
/* Nonzero if there is a non-trivial X::X(X&&) for this class. */
#define TYPE_HAS_COMPLEX_MOVE_CTOR(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_move_ctor)
/* Nonzero if there is no trivial default constructor for this class. */
#define TYPE_HAS_COMPLEX_DFLT(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->has_complex_dflt)
/* Nonzero if TYPE has a trivial destructor. From [class.dtor]:
A destructor is trivial if it is an implicitly declared
destructor and if:
- all of the direct base classes of its class have trivial
destructors,
- for all of the non-static data members of its class that are
of class type (or array thereof), each such class has a
trivial destructor. */
#define TYPE_HAS_TRIVIAL_DESTRUCTOR(NODE) \
(!TYPE_HAS_NONTRIVIAL_DESTRUCTOR (NODE))
/* Nonzero for _TYPE node means that this type does not have a trivial
destructor. Therefore, destroying an object of this type will
involve a call to a destructor. This can apply to objects of
ARRAY_TYPE is the type of the elements needs a destructor. */
#define TYPE_HAS_NONTRIVIAL_DESTRUCTOR(NODE) \
(TYPE_LANG_FLAG_4 (NODE))
/* Nonzero for class type means that the default constructor is trivial. */
#define TYPE_HAS_TRIVIAL_DFLT(NODE) \
(TYPE_HAS_DEFAULT_CONSTRUCTOR (NODE) && ! TYPE_HAS_COMPLEX_DFLT (NODE))
/* Nonzero for class type means that copy initialization of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_CTOR(NODE) \
(TYPE_HAS_COPY_CTOR (NODE) && ! TYPE_HAS_COMPLEX_COPY_CTOR (NODE))
/* Nonzero for class type means that assignment of this type can use
a bitwise copy. */
#define TYPE_HAS_TRIVIAL_COPY_ASSIGN(NODE) \
(TYPE_HAS_COPY_ASSIGN (NODE) && ! TYPE_HAS_COMPLEX_COPY_ASSIGN (NODE))
/* Returns true if NODE is a pointer-to-data-member. */
#define TYPE_PTRDATAMEM_P(NODE) \
(TREE_CODE (NODE) == OFFSET_TYPE)
/* Returns true if NODE is a pointer. */
#define TYPE_PTR_P(NODE) \
(TREE_CODE (NODE) == POINTER_TYPE)
/* Returns true if NODE is an object type:
[basic.types]
An object type is a (possibly cv-qualified) type that is not a
function type, not a reference type, and not a void type.
Keep these checks in ascending order, for speed. */
#define TYPE_OBJ_P(NODE) \
(TREE_CODE (NODE) != REFERENCE_TYPE \
&& !VOID_TYPE_P (NODE) \
&& TREE_CODE (NODE) != FUNCTION_TYPE \
&& TREE_CODE (NODE) != METHOD_TYPE)
/* Returns true if NODE is a pointer to an object. Keep these checks
in ascending tree code order. */
#define TYPE_PTROB_P(NODE) \
(TYPE_PTR_P (NODE) && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a reference to an object. Keep these checks
in ascending tree code order. */
#define TYPE_REF_OBJ_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE && TYPE_OBJ_P (TREE_TYPE (NODE)))
/* Returns true if NODE is a pointer to an object, or a pointer to
void. Keep these checks in ascending tree code order. */
#define TYPE_PTROBV_P(NODE) \
(TYPE_PTR_P (NODE) \
&& !(TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE \
|| TREE_CODE (TREE_TYPE (NODE)) == METHOD_TYPE))
/* Returns true if NODE is a pointer to function type. */
#define TYPE_PTRFN_P(NODE) \
(TYPE_PTR_P (NODE) \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a reference to function type. */
#define TYPE_REFFN_P(NODE) \
(TREE_CODE (NODE) == REFERENCE_TYPE \
&& TREE_CODE (TREE_TYPE (NODE)) == FUNCTION_TYPE)
/* Returns true if NODE is a pointer to member function type. */
#define TYPE_PTRMEMFUNC_P(NODE) \
(TREE_CODE (NODE) == RECORD_TYPE \
&& TYPE_PTRMEMFUNC_FLAG (NODE))
#define TYPE_PTRMEMFUNC_FLAG(NODE) \
(TYPE_LANG_FLAG_2 (RECORD_TYPE_CHECK (NODE)))
/* Returns true if NODE is a pointer-to-member. */
#define TYPE_PTRMEM_P(NODE) \
(TYPE_PTRDATAMEM_P (NODE) || TYPE_PTRMEMFUNC_P (NODE))
/* Returns true if NODE is a pointer or a pointer-to-member. */
#define TYPE_PTR_OR_PTRMEM_P(NODE) \
(TYPE_PTR_P (NODE) || TYPE_PTRMEM_P (NODE))
/* Indicates when overload resolution may resolve to a pointer to
member function. [expr.unary.op]/3 */
#define PTRMEM_OK_P(NODE) \
TREE_LANG_FLAG_0 (TREE_CHECK3 ((NODE), ADDR_EXPR, OFFSET_REF, SCOPE_REF))
/* Get the POINTER_TYPE to the METHOD_TYPE associated with this
pointer to member function. TYPE_PTRMEMFUNC_P _must_ be true,
before using this macro. */
#define TYPE_PTRMEMFUNC_FN_TYPE(NODE) \
(cp_build_qualified_type (TREE_TYPE (TYPE_FIELDS (NODE)),\
cp_type_quals (NODE)))
/* As above, but can be used in places that want an lvalue at the expense
of not necessarily having the correct cv-qualifiers. */
#define TYPE_PTRMEMFUNC_FN_TYPE_RAW(NODE) \
(TREE_TYPE (TYPE_FIELDS (NODE)))
/* Returns `A' for a type like `int (A::*)(double)' */
#define TYPE_PTRMEMFUNC_OBJECT_TYPE(NODE) \
TYPE_METHOD_BASETYPE (TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* The canonical internal RECORD_TYPE from the POINTER_TYPE to
METHOD_TYPE. */
#define TYPE_PTRMEMFUNC_TYPE(NODE) \
TYPE_LANG_SLOT_1 (NODE)
/* For a pointer-to-member type of the form `T X::*', this is `X'.
For a type like `void (X::*)() const', this type is `X', not `const
X'. To get at the `const X' you have to look at the
TYPE_PTRMEM_POINTED_TO_TYPE; there, the first parameter will have
type `const X*'. */
#define TYPE_PTRMEM_CLASS_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TYPE_OFFSET_BASETYPE (NODE) \
: TYPE_PTRMEMFUNC_OBJECT_TYPE (NODE))
/* For a pointer-to-member type of the form `T X::*', this is `T'. */
#define TYPE_PTRMEM_POINTED_TO_TYPE(NODE) \
(TYPE_PTRDATAMEM_P (NODE) \
? TREE_TYPE (NODE) \
: TREE_TYPE (TYPE_PTRMEMFUNC_FN_TYPE (NODE)))
/* For a pointer-to-member constant `X::Y' this is the RECORD_TYPE for
`X'. */
#define PTRMEM_CST_CLASS(NODE) \
TYPE_PTRMEM_CLASS_TYPE (TREE_TYPE (PTRMEM_CST_CHECK (NODE)))
/* For a pointer-to-member constant `X::Y' this is the _DECL for
`Y'. */
#define PTRMEM_CST_MEMBER(NODE) \
(((ptrmem_cst_t)PTRMEM_CST_CHECK (NODE))->member)
/* The expression in question for a TYPEOF_TYPE. */
#define TYPEOF_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (TYPEOF_TYPE_CHECK (NODE)))
/* The type in question for an UNDERLYING_TYPE. */
#define UNDERLYING_TYPE_TYPE(NODE) \
(TYPE_VALUES_RAW (UNDERLYING_TYPE_CHECK (NODE)))
/* The type in question for BASES. */
#define BASES_TYPE(NODE) \
(TYPE_VALUES_RAW (BASES_CHECK (NODE)))
#define BASES_DIRECT(NODE) \
TREE_LANG_FLAG_0 (BASES_CHECK (NODE))
/* The expression in question for a DECLTYPE_TYPE. */
#define DECLTYPE_TYPE_EXPR(NODE) (TYPE_VALUES_RAW (DECLTYPE_TYPE_CHECK (NODE)))
/* Whether the DECLTYPE_TYPE_EXPR of NODE was originally parsed as an
id-expression or a member-access expression. When false, it was
parsed as a full expression. */
#define DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P(NODE) \
(DECLTYPE_TYPE_CHECK (NODE))->type_common.string_flag
/* These flags indicate that we want different semantics from normal
decltype: lambda capture just drops references, init capture
uses auto semantics, lambda proxies look through implicit dereference. */
#define DECLTYPE_FOR_LAMBDA_CAPTURE(NODE) \
TREE_LANG_FLAG_0 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_INIT_CAPTURE(NODE) \
TREE_LANG_FLAG_1 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_LAMBDA_PROXY(NODE) \
TREE_LANG_FLAG_2 (DECLTYPE_TYPE_CHECK (NODE))
#define DECLTYPE_FOR_REF_CAPTURE(NODE) \
TREE_LANG_FLAG_3 (DECLTYPE_TYPE_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `extern' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_EXTERN(NODE) \
DECL_LANG_FLAG_2 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for VAR_DECL and FUNCTION_DECL node means that `static' was
specified in its declaration. This can also be set for an
erroneously declared PARM_DECL. */
#define DECL_THIS_STATIC(NODE) \
DECL_LANG_FLAG_6 (VAR_FUNCTION_OR_PARM_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a lambda capture
field for an array of runtime bound. */
#define DECL_VLA_CAPTURE_P(NODE) \
DECL_LANG_FLAG_1 (FIELD_DECL_CHECK (NODE))
/* Nonzero for PARM_DECL node means that this is an array function
parameter, i.e, a[] rather than *a. */
#define DECL_ARRAY_PARAMETER_P(NODE) \
DECL_LANG_FLAG_1 (PARM_DECL_CHECK (NODE))
/* Nonzero for a FIELD_DECL who's NSMDI is currently being
instantiated. */
#define DECL_INSTANTIATING_NSDMI_P(NODE) \
DECL_LANG_FLAG_2 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a base class
of the parent object, as opposed to a member field. */
#define DECL_FIELD_IS_BASE(NODE) \
DECL_LANG_FLAG_6 (FIELD_DECL_CHECK (NODE))
/* Nonzero for FIELD_DECL node means that this field is a simple (no
explicit initializer) lambda capture field, making it invisible to
name lookup in unevaluated contexts. */
#define DECL_NORMAL_CAPTURE_P(NODE) \
DECL_LANG_FLAG_7 (FIELD_DECL_CHECK (NODE))
/* Nonzero if TYPE is an anonymous union or struct type. We have to use a
flag for this because "A union for which objects or pointers are
declared is not an anonymous union" [class.union]. */
#define ANON_AGGR_TYPE_P(NODE) \
(CLASS_TYPE_P (NODE) && LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr)
#define SET_ANON_AGGR_TYPE_P(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->anon_aggr = 1)
/* Nonzero if TYPE is an anonymous union type. */
#define ANON_UNION_TYPE_P(NODE) \
(TREE_CODE (NODE) == UNION_TYPE && ANON_AGGR_TYPE_P (NODE))
/* Define fields and accessors for nodes representing declared names. */
/* Nonzero if TYPE is an unnamed class with a typedef for linkage purposes. */
#define TYPE_WAS_UNNAMED(NODE) (LANG_TYPE_CLASS_CHECK (NODE)->was_anonymous)
/* C++: all of these are overloaded! These apply only to TYPE_DECLs. */
/* The format of each node in the DECL_FRIENDLIST is as follows:
The TREE_PURPOSE will be the name of a function, i.e., an
IDENTIFIER_NODE. The TREE_VALUE will be itself a TREE_LIST, whose
TREE_VALUEs are friends with the given name. */
#define DECL_FRIENDLIST(NODE) (DECL_INITIAL (NODE))
#define FRIEND_NAME(LIST) (TREE_PURPOSE (LIST))
#define FRIEND_DECLS(LIST) (TREE_VALUE (LIST))
/* The DECL_ACCESS, if non-NULL, is a TREE_LIST. The TREE_PURPOSE of
each node is a type; the TREE_VALUE is the access granted for this
DECL in that type. The DECL_ACCESS is set by access declarations.
For example, if a member that would normally be public in a
derived class is made protected, then the derived class and the
protected_access_node will appear in the DECL_ACCESS for the node. */
#define DECL_ACCESS(NODE) (LANG_DECL_U2_CHECK (NODE, 0)->access)
/* Nonzero if the FUNCTION_DECL is a global constructor. */
#define DECL_GLOBAL_CTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_ctor_p)
/* Nonzero if the FUNCTION_DECL is a global destructor. */
#define DECL_GLOBAL_DTOR_P(NODE) \
(LANG_DECL_FN_CHECK (NODE)->global_dtor_p)
/* Accessor macros for C++ template decl nodes. */
/* The DECL_TEMPLATE_PARMS are a list. The TREE_PURPOSE of each node
is a INT_CST whose TREE_INT_CST_LOW indicates the level of the
template parameters, with 1 being the outermost set of template
parameters. The TREE_VALUE is a vector, whose elements are the
template parameters at each level. Each element in the vector is a
TREE_LIST, whose TREE_VALUE is a PARM_DECL (if the parameter is a
non-type parameter), or a TYPE_DECL (if the parameter is a type
parameter). The TREE_PURPOSE is the default value, if any. The
TEMPLATE_PARM_INDEX for the parameter is available as the
DECL_INITIAL (for a PARM_DECL) or as the TREE_TYPE (for a
TYPE_DECL).
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_PARMS(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE (TEMPLATE_DECL_CHECK (NODE)))->arguments
#define DECL_INNERMOST_TEMPLATE_PARMS(NODE) \
INNERMOST_TEMPLATE_PARMS (DECL_TEMPLATE_PARMS (NODE))
#define DECL_NTPARMS(NODE) \
TREE_VEC_LENGTH (DECL_INNERMOST_TEMPLATE_PARMS (NODE))
/* For function, method, class-data templates.
FIXME: CONST_CAST_TREE is a hack that hopefully will go away after
tree is converted to C++ class hiearchy. */
#define DECL_TEMPLATE_RESULT(NODE) \
((struct tree_template_decl *)CONST_CAST_TREE(TEMPLATE_DECL_CHECK (NODE)))->result
/* For a function template at namespace scope, DECL_TEMPLATE_INSTANTIATIONS
lists all instantiations and specializations of the function so that
tsubst_friend_function can reassign them to another template if we find
that the namespace-scope template is really a partial instantiation of a
friend template.
For a class template the DECL_TEMPLATE_INSTANTIATIONS lists holds
all instantiations and specializations of the class type, including
partial instantiations and partial specializations, so that if we
explicitly specialize a partial instantiation we can walk the list
in maybe_process_partial_specialization and reassign them or complain
as appropriate.
In both cases, the TREE_PURPOSE of each node contains the arguments
used; the TREE_VALUE contains the generated variable. The template
arguments are always complete. For example, given:
template <class T> struct S1 {
template <class U> struct S2 {};
template <class U> struct S2<U*> {};
};
the record for the partial specialization will contain, as its
argument list, { {T}, {U*} }, and will be on the
DECL_TEMPLATE_INSTANTIATIONS list for `template <class T> template
<class U> struct S1<T>::S2'.
This list is not used for other templates. */
#define DECL_TEMPLATE_INSTANTIATIONS(NODE) \
DECL_SIZE_UNIT (TEMPLATE_DECL_CHECK (NODE))
/* For a class template, this list contains the partial
specializations of this template. (Full specializations are not
recorded on this list.) The TREE_PURPOSE holds the arguments used
in the partial specialization (e.g., for `template <class T> struct
S<T*, int>' this will be `T*, int'.) The arguments will also include
any outer template arguments. The TREE_VALUE holds the TEMPLATE_DECL
for the partial specialization. The TREE_TYPE is the _TYPE node for
the partial specialization.
This list is not used for other templates. */
#define DECL_TEMPLATE_SPECIALIZATIONS(NODE) \
DECL_SIZE (TEMPLATE_DECL_CHECK (NODE))
/* Nonzero for a DECL which is actually a template parameter. Keep
these checks in ascending tree code order. */
#define DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) \
&& (TREE_CODE (NODE) == CONST_DECL \
|| TREE_CODE (NODE) == PARM_DECL \
|| TREE_CODE (NODE) == TYPE_DECL \
|| TREE_CODE (NODE) == TEMPLATE_DECL))
/* Nonzero for a raw template parameter node. */
#define TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_TYPE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_TEMPLATE_PARM \
|| TREE_CODE (NODE) == TEMPLATE_PARM_INDEX)
/* Mark NODE as a template parameter. */
#define SET_DECL_TEMPLATE_PARM_P(NODE) \
(DECL_LANG_FLAG_0 (NODE) = 1)
/* Nonzero if NODE is a template template parameter. */
#define DECL_TEMPLATE_TEMPLATE_PARM_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL && DECL_TEMPLATE_PARM_P (NODE))
/* Nonzero for a DECL that represents a function template. */
#define DECL_FUNCTION_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == FUNCTION_DECL)
/* Nonzero for a DECL that represents a class template or alias
template. */
#define DECL_TYPE_TEMPLATE_P(NODE) \
(TREE_CODE (NODE) == TEMPLATE_DECL \
&& DECL_TEMPLATE_RESULT (NODE) != NULL_TREE \
&& TREE_CODE (DECL_TEMPLATE_RESULT (NODE)) == TYPE_DECL)
/* Nonzero for a DECL that represents a class template. */
#define DECL_CLASS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& DECL_IMPLICIT_TYPEDEF_P (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a TEMPLATE_DECL that represents an alias template. */
#define DECL_ALIAS_TEMPLATE_P(NODE) \
(DECL_TYPE_TEMPLATE_P (NODE) \
&& !DECL_ARTIFICIAL (DECL_TEMPLATE_RESULT (NODE)))
/* Nonzero for a NODE which declares a type. */
#define DECL_DECLARES_TYPE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL || DECL_TYPE_TEMPLATE_P (NODE))
/* Nonzero if NODE declares a function. */
#define DECL_DECLARES_FUNCTION_P(NODE) \
(TREE_CODE (NODE) == FUNCTION_DECL || DECL_FUNCTION_TEMPLATE_P (NODE))
/* Nonzero if NODE is the typedef implicitly generated for a type when
the type is declared. In C++, `struct S {};' is roughly
equivalent to `struct S {}; typedef struct S S;' in C.
DECL_IMPLICIT_TYPEDEF_P will hold for the typedef indicated in this
example. In C++, there is a second implicit typedef for each
class, called the injected-class-name, in the scope of `S' itself, so that
you can say `S::S'. DECL_SELF_REFERENCE_P will hold for that typedef. */
#define DECL_IMPLICIT_TYPEDEF_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_2 (NODE))
#define SET_DECL_IMPLICIT_TYPEDEF_P(NODE) \
(DECL_LANG_FLAG_2 (NODE) = 1)
#define DECL_SELF_REFERENCE_P(NODE) \
(TREE_CODE (NODE) == TYPE_DECL && DECL_LANG_FLAG_4 (NODE))
#define SET_DECL_SELF_REFERENCE_P(NODE) \
(DECL_LANG_FLAG_4 (NODE) = 1)
/* A `primary' template is one that has its own template header and is not
a partial specialization. A member function of a class template is a
template, but not primary. A member template is primary. Friend
templates are primary, too. */
/* Returns the primary template corresponding to these parameters. */
#define DECL_PRIMARY_TEMPLATE(NODE) \
(TREE_TYPE (DECL_INNERMOST_TEMPLATE_PARMS (NODE)))
/* Returns nonzero if NODE is a primary template. */
#define PRIMARY_TEMPLATE_P(NODE) (DECL_PRIMARY_TEMPLATE (NODE) == (NODE))
/* Nonzero iff NODE is a specialization of a template. The value
indicates the type of specializations:
1=implicit instantiation
2=partial or explicit specialization, e.g.:
template <> int min<int> (int, int),
3=explicit instantiation, e.g.:
template int min<int> (int, int);
Note that NODE will be marked as a specialization even if the
template it is instantiating is not a primary template. For
example, given:
template <typename T> struct O {
void f();
struct I {};
};
both O<int>::f and O<int>::I will be marked as instantiations.
If DECL_USE_TEMPLATE is nonzero, then DECL_TEMPLATE_INFO will also
be non-NULL. */
#define DECL_USE_TEMPLATE(NODE) (DECL_LANG_SPECIFIC (NODE)->u.base.use_template)
/* Like DECL_USE_TEMPLATE, but for class types. */
#define CLASSTYPE_USE_TEMPLATE(NODE) \
(LANG_TYPE_CLASS_CHECK (NODE)->use_template)
/* True if NODE is a specialization of a primary template. */
#define CLASSTYPE_SPECIALIZATION_OF_PRIMARY_TEMPLATE_P(NODE) \
(CLASS_TYPE_P (NODE) \
&& CLASSTYPE_USE_TEMPLATE (NODE) \
&& PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (NODE)))
#define DECL_TEMPLATE_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) & 1)
#define CLASSTYPE_TEMPLATE_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) & 1)
#define DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) == 2)
#define SET_DECL_TEMPLATE_SPECIALIZATION(NODE) (DECL_USE_TEMPLATE (NODE) = 2)
/* Returns true for an explicit or partial specialization of a class
template. */
#define CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 2)
#define SET_CLASSTYPE_TEMPLATE_SPECIALIZATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 2)
#define DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 1)
#define SET_DECL_IMPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 1)
#define CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 1)
#define SET_CLASSTYPE_IMPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 1)
#define DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) == 3)
#define SET_DECL_EXPLICIT_INSTANTIATION(NODE) (DECL_USE_TEMPLATE (NODE) = 3)
#define CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) == 3)
#define SET_CLASSTYPE_EXPLICIT_INSTANTIATION(NODE) \
(CLASSTYPE_USE_TEMPLATE (NODE) = 3)
/* Nonzero if DECL is a friend function which is an instantiation
from the point of view of the compiler, but not from the point of
view of the language. For example given:
template <class T> struct S { friend void f(T) {}; };
the declaration of `void f(int)' generated when S<int> is
instantiated will not be a DECL_TEMPLATE_INSTANTIATION, but will be
a DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION. */
#define DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION(DECL) \
(DECL_LANG_SPECIFIC (DECL) && DECL_TEMPLATE_INFO (DECL) \
&& !DECL_USE_TEMPLATE (DECL))
/* Nonzero if DECL is a function generated from a function 'temploid',
i.e. template, member of class template, or dependent friend. */
#define DECL_TEMPLOID_INSTANTIATION(DECL) \
(DECL_TEMPLATE_INSTANTIATION (DECL) \
|| DECL_FRIEND_PSEUDO_TEMPLATE_INSTANTIATION (DECL))
/* Nonzero if DECL is either defined implicitly by the compiler or
generated from a temploid. */
#define DECL_GENERATED_P(DECL) \
(DECL_TEMPLOID_INSTANTIATION (DECL) || DECL_DEFAULTED_FN (DECL))
/* Nonzero iff we are currently processing a declaration for an
entity with its own template parameter list, and which is not a
full specialization. */
#define PROCESSING_REAL_TEMPLATE_DECL_P() \
(!processing_template_parmlist \
&& processing_template_decl > template_class_depth (current_scope ()))
/* Nonzero if this VAR_DECL or FUNCTION_DECL has already been
instantiated, i.e. its definition has been generated from the
pattern given in the template. */
#define DECL_TEMPLATE_INSTANTIATED(NODE) \
DECL_LANG_FLAG_1 (VAR_OR_FUNCTION_DECL_CHECK (NODE))
/* We know what we're doing with this decl now. */
#define DECL_INTERFACE_KNOWN(NODE) DECL_LANG_FLAG_5 (NODE)
/* DECL_EXTERNAL must be set on a decl until the decl is actually emitted,
so that assemble_external will work properly. So we have this flag to
tell us whether the decl is really not external.
This flag does not indicate whether or not the decl is defined in the
current translation unit; it indicates whether or not we should emit the
decl at the end of compilation if it is defined and needed. */
#define DECL_NOT_REALLY_EXTERN(NODE) \
(DECL_LANG_SPECIFIC (NODE)->u.base.not_really_extern)
#define DECL_REALLY_EXTERN(NODE) \
(DECL_EXTERNAL (NODE) \
&& (!DECL_LANG_SPECIFIC (NODE) || !DECL_NOT_REALLY_EXTERN (NODE)))
/* A thunk is a stub function.
A thunk is an alternate entry point for an ordinary FUNCTION_DECL.
The address of the ordinary FUNCTION_DECL is given by the
DECL_INITIAL, which is always an ADDR_EXPR whose operand is a
FUNCTION_DECL. The job of the thunk is to either adjust the this
pointer before transferring control to the FUNCTION_DECL, or call
FUNCTION_DECL and then adjust the result value. Note, the result
pointer adjusting thunk must perform a call to the thunked
function, (or be implemented via passing some invisible parameter
to the thunked function, which is modified to perform the
adjustment just before returning).
A thunk may perform either, or both, of the following operations:
o Adjust the this or result pointer by a constant offset.
o Adjust the this or result pointer by looking up a vcall or vbase offset
in the vtable.
A this pointer adjusting thunk converts from a base to a derived
class, and hence adds the offsets. A result pointer adjusting thunk
converts from a derived class to a base, and hence subtracts the
offsets. If both operations are performed, then the constant
adjustment is performed first for this pointer adjustment and last
for the result pointer adjustment.
The constant adjustment is given by THUNK_FIXED_OFFSET. If the
vcall or vbase offset is required, THUNK_VIRTUAL_OFFSET is
used. For this pointer adjusting thunks, it is the vcall offset
into the vtable. For result pointer adjusting thunks it is the
binfo of the virtual base to convert to. Use that binfo's vbase
offset.
It is possible to have equivalent covariant thunks. These are
distinct virtual covariant thunks whose vbase offsets happen to
have the same value. THUNK_ALIAS is used to pick one as the
canonical thunk, which will get all the this pointer adjusting
thunks attached to it. */
/* An integer indicating how many bytes should be subtracted from the
this or result pointer when this function is called. */
#define THUNK_FIXED_OFFSET(DECL) \
(DECL_LANG_SPECIFIC (THUNK_FUNCTION_CHECK (DECL))->u.fn.u5.fixed_offset)
/* A tree indicating how to perform the virtual adjustment. For a this
adjusting thunk it is the number of bytes to be added to the vtable
to find the vcall offset. For a result adjusting thunk, it is the
binfo of the relevant virtual base. If NULL, then there is no
virtual adjust. (The vptr is always located at offset zero from
the this or result pointer.) (If the covariant type is within the
class hierarchy being laid out, the vbase index is not yet known
at the point we need to create the thunks, hence the need to use
binfos.) */
#define THUNK_VIRTUAL_OFFSET(DECL) \
(LANG_DECL_U2_CHECK (FUNCTION_DECL_CHECK (DECL), 0)->access)
/* A thunk which is equivalent to another thunk. */
#define THUNK_ALIAS(DECL) \
(DECL_LANG_SPECIFIC (FUNCTION_DECL_CHECK (DECL))->u.min.template_info)
/* For thunk NODE, this is the FUNCTION_DECL thunked to. It is
possible for the target to be a thunk too. */
#define THUNK_TARGET(NODE) \
(LANG_DECL_FN_CHECK (NODE)->befriending_classes)
/* True for a SCOPE_REF iff the "template" keyword was used to
indicate that the qualified name denotes a template. */
#define QUALIFIED_NAME_IS_TEMPLATE(NODE) \
(TREE_LANG_FLAG_1 (SCOPE_REF_CHECK (NODE)))
/* True for an OMP_ATOMIC that has dependent parameters. These are stored
as an expr in operand 1, and integer_zero_node in operand 0. */
#define OMP_ATOMIC_DEPENDENT_P(NODE) \
(TREE_CODE (TREE_OPERAND (OMP_ATOMIC_CHECK (NODE), 0)) == INTEGER_CST)
/* Used while gimplifying continue statements bound to OMP_FOR nodes. */
#define OMP_FOR_GIMPLIFYING_P(NODE) \
(TREE_LANG_FLAG_0 (OMP_LOOP_CHECK (NODE)))
/* A language-specific token attached to the OpenMP data clauses to
hold code (or code fragments) related to ctors, dtors, and op=.
See semantics.c for details. */
#define CP_OMP_CLAUSE_INFO(NODE) \
TREE_TYPE (OMP_CLAUSE_RANGE_CHECK (NODE, OMP_CLAUSE_PRIVATE, \
OMP_CLAUSE_LINEAR))
/* Nonzero if this transaction expression's body contains statements. */
#define TRANSACTION_EXPR_IS_STMT(NODE) \
TREE_LANG_FLAG_0 (TRANSACTION_EXPR_CHECK (NODE))
/* These macros provide convenient access to the various _STMT nodes
created when parsing template declarations. */
#define TRY_STMTS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 0)
#define TRY_HANDLERS(NODE) TREE_OPERAND (TRY_BLOCK_CHECK (NODE), 1)
#define EH_SPEC_STMTS(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 0)
#define EH_SPEC_RAISES(NODE) TREE_OPERAND (EH_SPEC_BLOCK_CHECK (NODE), 1)
#define USING_STMT_NAMESPACE(NODE) TREE_OPERAND (USING_STMT_CHECK (NODE), 0)
/* Nonzero if this try block is a function try block. */
#define FN_TRY_BLOCK_P(NODE) TREE_LANG_FLAG_3 (TRY_BLOCK_CHECK (NODE))
#define HANDLER_PARMS(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 0)
#define HANDLER_BODY(NODE) TREE_OPERAND (HANDLER_CHECK (NODE), 1)
#define HANDLER_TYPE(NODE) TREE_TYPE (HANDLER_CHECK (NODE))
/* CLEANUP_STMT accessors. The statement(s) covered, the cleanup to run
and the VAR_DECL for which this cleanup exists. */
#define CLEANUP_BODY(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 0)
#define CLEANUP_EXPR(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 1)
#define CLEANUP_DECL(NODE) TREE_OPERAND (CLEANUP_STMT_CHECK (NODE), 2)
/* IF_STMT accessors. These give access to the condition of the if
statement, the then block of the if statement, and the else block
of the if statement if it exists. */
#define IF_COND(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 0)
#define THEN_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 1)
#define ELSE_CLAUSE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 2)
#define IF_SCOPE(NODE) TREE_OPERAND (IF_STMT_CHECK (NODE), 3)
#define IF_STMT_CONSTEXPR_P(NODE) TREE_LANG_FLAG_0 (IF_STMT_CHECK (NODE))
/* Like PACK_EXPANSION_EXTRA_ARGS, for constexpr if. IF_SCOPE is used while
building an IF_STMT; IF_STMT_EXTRA_ARGS is used after it is complete. */
#define IF_STMT_EXTRA_ARGS(NODE) IF_SCOPE (NODE)
/* WHILE_STMT accessors. These give access to the condition of the
while statement and the body of the while statement, respectively. */
#define WHILE_COND(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 0)
#define WHILE_BODY(NODE) TREE_OPERAND (WHILE_STMT_CHECK (NODE), 1)
/* DO_STMT accessors. These give access to the condition of the do
statement and the body of the do statement, respectively. */
#define DO_COND(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 0)
#define DO_BODY(NODE) TREE_OPERAND (DO_STMT_CHECK (NODE), 1)
/* FOR_STMT accessors. These give access to the init statement,
condition, update expression, and body of the for statement,
respectively. */
#define FOR_INIT_STMT(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 0)
#define FOR_COND(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 1)
#define FOR_EXPR(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 2)
#define FOR_BODY(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 3)
#define FOR_SCOPE(NODE) TREE_OPERAND (FOR_STMT_CHECK (NODE), 4)
/* RANGE_FOR_STMT accessors. These give access to the declarator,
expression, body, and scope of the statement, respectively. */
#define RANGE_FOR_DECL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 0)
#define RANGE_FOR_EXPR(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 1)
#define RANGE_FOR_BODY(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 2)
#define RANGE_FOR_SCOPE(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 3)
#define RANGE_FOR_UNROLL(NODE) TREE_OPERAND (RANGE_FOR_STMT_CHECK (NODE), 4)
#define RANGE_FOR_IVDEP(NODE) TREE_LANG_FLAG_6 (RANGE_FOR_STMT_CHECK (NODE))
#define SWITCH_STMT_COND(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 0)
#define SWITCH_STMT_BODY(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 1)
#define SWITCH_STMT_TYPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 2)
#define SWITCH_STMT_SCOPE(NODE) TREE_OPERAND (SWITCH_STMT_CHECK (NODE), 3)
/* True if there are case labels for all possible values of switch cond, either
because there is a default: case label or because the case label ranges cover
all values. */
#define SWITCH_STMT_ALL_CASES_P(NODE) \
TREE_LANG_FLAG_0 (SWITCH_STMT_CHECK (NODE))
/* True if the body of a switch stmt contains no BREAK_STMTs. */
#define SWITCH_STMT_NO_BREAK_P(NODE) \
TREE_LANG_FLAG_2 (SWITCH_STMT_CHECK (NODE))
/* STMT_EXPR accessor. */
#define STMT_EXPR_STMT(NODE) TREE_OPERAND (STMT_EXPR_CHECK (NODE), 0)
/* EXPR_STMT accessor. This gives the expression associated with an
expression statement. */
#define EXPR_STMT_EXPR(NODE) TREE_OPERAND (EXPR_STMT_CHECK (NODE), 0)
/* True if this TARGET_EXPR was created by build_cplus_new, and so we can
discard it if it isn't useful. */
#define TARGET_EXPR_IMPLICIT_P(NODE) \
TREE_LANG_FLAG_0 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR is the result of list-initialization of a
temporary. */
#define TARGET_EXPR_LIST_INIT_P(NODE) \
TREE_LANG_FLAG_1 (TARGET_EXPR_CHECK (NODE))
/* True if this TARGET_EXPR expresses direct-initialization of an object
to be named later. */
#define TARGET_EXPR_DIRECT_INIT_P(NODE) \
TREE_LANG_FLAG_2 (TARGET_EXPR_CHECK (NODE))
/* True if NODE is a TARGET_EXPR that just expresses a copy of its INITIAL; if
the initializer has void type, it's doing something more complicated. */
#define SIMPLE_TARGET_EXPR_P(NODE) \
(TREE_CODE (NODE) == TARGET_EXPR \
&& !VOID_TYPE_P (TREE_TYPE (TARGET_EXPR_INITIAL (NODE))))
/* True if EXPR expresses direct-initialization of a TYPE. */
#define DIRECT_INIT_EXPR_P(TYPE,EXPR) \
(TREE_CODE (EXPR) == TARGET_EXPR && TREE_LANG_FLAG_2 (EXPR) \
&& same_type_ignoring_top_level_qualifiers_p (TYPE, TREE_TYPE (EXPR)))
/* True if this CONVERT_EXPR is for a conversion to virtual base in
an NSDMI, and should be re-evaluated when used in a constructor. */
#define CONVERT_EXPR_VBASE_PATH(NODE) \
TREE_LANG_FLAG_0 (CONVERT_EXPR_CHECK (NODE))
/* True if SIZEOF_EXPR argument is type. */
#define SIZEOF_EXPR_TYPE_P(NODE) \
TREE_LANG_FLAG_0 (SIZEOF_EXPR_CHECK (NODE))
/* True if the ALIGNOF_EXPR was spelled "alignof". */
#define ALIGNOF_EXPR_STD_P(NODE) \
TREE_LANG_FLAG_0 (ALIGNOF_EXPR_CHECK (NODE))
/* An enumeration of the kind of tags that C++ accepts. */
enum tag_types {
none_type = 0, /* Not a tag type. */
record_type, /* "struct" types. */
class_type, /* "class" types. */
union_type, /* "union" types. */
enum_type, /* "enum" types. */
typename_type, /* "typename" types. */
scope_type /* namespace or tagged type name followed by :: */
};
/* The various kinds of lvalues we distinguish. */
enum cp_lvalue_kind_flags {
clk_none = 0, /* Things that are not an lvalue. */
clk_ordinary = 1, /* An ordinary lvalue. */
clk_rvalueref = 2,/* An xvalue (rvalue formed using an rvalue reference) */
clk_class = 4, /* A prvalue of class or array type. */
clk_bitfield = 8, /* An lvalue for a bit-field. */
clk_packed = 16 /* An lvalue for a packed field. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum cp_lvalue_kind_flags. */
typedef int cp_lvalue_kind;
/* Various kinds of template specialization, instantiation, etc. */
enum tmpl_spec_kind {
tsk_none, /* Not a template at all. */
tsk_invalid_member_spec, /* An explicit member template
specialization, but the enclosing
classes have not all been explicitly
specialized. */
tsk_invalid_expl_inst, /* An explicit instantiation containing
template parameter lists. */
tsk_excessive_parms, /* A template declaration with too many
template parameter lists. */
tsk_insufficient_parms, /* A template declaration with too few
parameter lists. */
tsk_template, /* A template declaration. */
tsk_expl_spec, /* An explicit specialization. */
tsk_expl_inst /* An explicit instantiation. */
};
/* The various kinds of access. BINFO_ACCESS depends on these being
two bit quantities. The numerical values are important; they are
used to initialize RTTI data structures, so changing them changes
the ABI. */
enum access_kind {
ak_none = 0, /* Inaccessible. */
ak_public = 1, /* Accessible, as a `public' thing. */
ak_protected = 2, /* Accessible, as a `protected' thing. */
ak_private = 3 /* Accessible, as a `private' thing. */
};
/* The various kinds of special functions. If you add to this list,
you should update special_function_p as well. */
enum special_function_kind {
sfk_none = 0, /* Not a special function. This enumeral
must have value zero; see
special_function_p. */
sfk_constructor, /* A constructor. */
sfk_copy_constructor, /* A copy constructor. */
sfk_move_constructor, /* A move constructor. */
sfk_copy_assignment, /* A copy assignment operator. */
sfk_move_assignment, /* A move assignment operator. */
sfk_destructor, /* A destructor. */
sfk_complete_destructor, /* A destructor for complete objects. */
sfk_base_destructor, /* A destructor for base subobjects. */
sfk_deleting_destructor, /* A destructor for complete objects that
deletes the object after it has been
destroyed. */
sfk_conversion, /* A conversion operator. */
sfk_deduction_guide, /* A class template deduction guide. */
sfk_inheriting_constructor /* An inheriting constructor */
};
/* The various kinds of linkage. From [basic.link],
A name is said to have linkage when it might denote the same
object, reference, function, type, template, namespace or value
as a name introduced in another scope:
-- When a name has external linkage, the entity it denotes can
be referred to from scopes of other translation units or from
other scopes of the same translation unit.
-- When a name has internal linkage, the entity it denotes can
be referred to by names from other scopes in the same
translation unit.
-- When a name has no linkage, the entity it denotes cannot be
referred to by names from other scopes. */
enum linkage_kind {
lk_none, /* No linkage. */
lk_internal, /* Internal linkage. */
lk_external /* External linkage. */
};
enum duration_kind {
dk_static,
dk_thread,
dk_auto,
dk_dynamic
};
/* Bitmask flags to control type substitution. */
enum tsubst_flags {
tf_none = 0, /* nothing special */
tf_error = 1 << 0, /* give error messages */
tf_warning = 1 << 1, /* give warnings too */
tf_ignore_bad_quals = 1 << 2, /* ignore bad cvr qualifiers */
tf_keep_type_decl = 1 << 3, /* retain typedef type decls
(make_typename_type use) */
tf_ptrmem_ok = 1 << 4, /* pointers to member ok (internal
instantiate_type use) */
tf_user = 1 << 5, /* found template must be a user template
(lookup_template_class use) */
tf_conv = 1 << 6, /* We are determining what kind of
conversion might be permissible,
not actually performing the
conversion. */
tf_decltype = 1 << 7, /* We are the operand of decltype.
Used to implement the special rules
for calls in decltype (5.2.2/11). */
tf_partial = 1 << 8, /* Doing initial explicit argument
substitution in fn_type_unification. */
tf_fndecl_type = 1 << 9, /* Substituting the type of a function
declaration. */
tf_no_cleanup = 1 << 10, /* Do not build a cleanup
(build_target_expr and friends) */
/* Convenient substitution flags combinations. */
tf_warning_or_error = tf_warning | tf_error
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum tsubst_flags. */
typedef int tsubst_flags_t;
/* The kind of checking we can do looking in a class hierarchy. */
enum base_access_flags {
ba_any = 0, /* Do not check access, allow an ambiguous base,
prefer a non-virtual base */
ba_unique = 1 << 0, /* Must be a unique base. */
ba_check_bit = 1 << 1, /* Check access. */
ba_check = ba_unique | ba_check_bit,
ba_ignore_scope = 1 << 2 /* Ignore access allowed by local scope. */
};
/* This type is used for parameters and variables which hold
combinations of the flags in enum base_access_flags. */
typedef int base_access;
/* The various kinds of access check during parsing. */
enum deferring_kind {
dk_no_deferred = 0, /* Check access immediately */
dk_deferred = 1, /* Deferred check */
dk_no_check = 2 /* No access check */
};
/* The kind of base we can find, looking in a class hierarchy.
Values <0 indicate we failed. */
enum base_kind {
bk_inaccessible = -3, /* The base is inaccessible */
bk_ambig = -2, /* The base is ambiguous */
bk_not_base = -1, /* It is not a base */
bk_same_type = 0, /* It is the same type */
bk_proper_base = 1, /* It is a proper base */
bk_via_virtual = 2 /* It is a proper base, but via a virtual
path. This might not be the canonical
binfo. */
};
/* Node for "pointer to (virtual) function".
This may be distinct from ptr_type_node so gdb can distinguish them. */
#define vfunc_ptr_type_node vtable_entry_type
/* For building calls to `delete'. */
extern GTY(()) tree integer_two_node;
/* The number of function bodies which we are currently processing.
(Zero if we are at namespace scope, one inside the body of a
function, two inside the body of a function in a local class, etc.) */
extern int function_depth;
/* Nonzero if we are inside eq_specializations, which affects comparison of
PARM_DECLs in cp_tree_equal. */
extern int comparing_specializations;
/* In parser.c. */
/* Nonzero if we are parsing an unevaluated operand: an operand to
sizeof, typeof, or alignof. This is a count since operands to
sizeof can be nested. */
extern int cp_unevaluated_operand;
/* RAII class used to inhibit the evaluation of operands during parsing
and template instantiation. Evaluation warnings are also inhibited. */
struct cp_unevaluated
{
cp_unevaluated ();
~cp_unevaluated ();
};
/* in pt.c */
/* These values are used for the `STRICT' parameter to type_unification and
fn_type_unification. Their meanings are described with the
documentation for fn_type_unification. */
enum unification_kind_t {
DEDUCE_CALL,
DEDUCE_CONV,
DEDUCE_EXACT
};
// An RAII class used to create a new pointer map for local
// specializations. When the stack goes out of scope, the
// previous pointer map is restored.
enum lss_policy { lss_blank, lss_copy };
struct local_specialization_stack
{
local_specialization_stack (lss_policy = lss_blank);
~local_specialization_stack ();
hash_map<tree, tree> *saved;
};
/* in class.c */
extern int current_class_depth;
/* An array of all local classes present in this translation unit, in
declaration order. */
extern GTY(()) vec<tree, va_gc> *local_classes;
/* in decl.c */
/* An array of static vars & fns. */
extern GTY(()) vec<tree, va_gc> *static_decls;
/* An array of vtable-needing types that have no key function, or have
an emitted key function. */
extern GTY(()) vec<tree, va_gc> *keyed_classes;
/* Here's where we control how name mangling takes place. */
/* Cannot use '$' up front, because this confuses gdb
(names beginning with '$' are gdb-local identifiers).
Note that all forms in which the '$' is significant are long enough
for direct indexing (meaning that if we know there is a '$'
at a particular location, we can index into the string at
any other location that provides distinguishing characters). */
/* Define NO_DOT_IN_LABEL in your favorite tm file if your assembler
doesn't allow '.' in symbol names. */
#ifndef NO_DOT_IN_LABEL
#define JOINER '.'
#define AUTO_TEMP_NAME "_.tmp_"
#define VFIELD_BASE ".vf"
#define VFIELD_NAME "_vptr."
#define VFIELD_NAME_FORMAT "_vptr.%s"
#else /* NO_DOT_IN_LABEL */
#ifndef NO_DOLLAR_IN_LABEL
#define JOINER '$'
#define AUTO_TEMP_NAME "_$tmp_"
#define VFIELD_BASE "$vf"
#define VFIELD_NAME "_vptr$"
#define VFIELD_NAME_FORMAT "_vptr$%s"
#else /* NO_DOLLAR_IN_LABEL */
#define AUTO_TEMP_NAME "__tmp_"
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, \
sizeof (AUTO_TEMP_NAME) - 1))
#define VTABLE_NAME "__vt_"
#define VTABLE_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VTABLE_NAME, \
sizeof (VTABLE_NAME) - 1))
#define VFIELD_BASE "__vfb"
#define VFIELD_NAME "__vptr_"
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, \
sizeof (VFIELD_NAME) - 1))
#define VFIELD_NAME_FORMAT "__vptr_%s"
#endif /* NO_DOLLAR_IN_LABEL */
#endif /* NO_DOT_IN_LABEL */
#define LAMBDANAME_PREFIX "__lambda"
#define LAMBDANAME_FORMAT LAMBDANAME_PREFIX "%d"
#define UDLIT_OP_ANSI_PREFIX "operator\"\""
#define UDLIT_OP_ANSI_FORMAT UDLIT_OP_ANSI_PREFIX "%s"
#define UDLIT_OP_MANGLED_PREFIX "li"
#define UDLIT_OP_MANGLED_FORMAT UDLIT_OP_MANGLED_PREFIX "%s"
#define UDLIT_OPER_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), \
UDLIT_OP_ANSI_PREFIX, \
sizeof (UDLIT_OP_ANSI_PREFIX) - 1))
#define UDLIT_OP_SUFFIX(ID_NODE) \
(IDENTIFIER_POINTER (ID_NODE) + sizeof (UDLIT_OP_ANSI_PREFIX) - 1)
#if !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL)
#define VTABLE_NAME_P(ID_NODE) (IDENTIFIER_POINTER (ID_NODE)[1] == 'v' \
&& IDENTIFIER_POINTER (ID_NODE)[2] == 't' \
&& IDENTIFIER_POINTER (ID_NODE)[3] == JOINER)
#define TEMP_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), AUTO_TEMP_NAME, sizeof (AUTO_TEMP_NAME)-1))
#define VFIELD_NAME_P(ID_NODE) \
(!strncmp (IDENTIFIER_POINTER (ID_NODE), VFIELD_NAME, sizeof(VFIELD_NAME)-1))
#endif /* !defined(NO_DOLLAR_IN_LABEL) || !defined(NO_DOT_IN_LABEL) */
/* Nonzero if we're done parsing and into end-of-file activities.
Two if we're done with front-end processing. */
extern int at_eof;
/* True if note_mangling_alias should enqueue mangling aliases for
later generation, rather than emitting them right away. */
extern bool defer_mangling_aliases;
/* True if noexcept is part of the type (i.e. in C++17). */
extern bool flag_noexcept_type;
/* A list of namespace-scope objects which have constructors or
destructors which reside in the global scope. The decl is stored
in the TREE_VALUE slot and the initializer is stored in the
TREE_PURPOSE slot. */
extern GTY(()) tree static_aggregates;
/* Likewise, for thread local storage. */
extern GTY(()) tree tls_aggregates;
enum overload_flags { NO_SPECIAL = 0, DTOR_FLAG, TYPENAME_FLAG };
/* These are uses as bits in flags passed to various functions to
control their behavior. Despite the LOOKUP_ prefix, many of these
do not control name lookup. ??? Functions using these flags should
probably be modified to accept explicit boolean flags for the
behaviors relevant to them. */
/* Check for access violations. */
#define LOOKUP_PROTECT (1 << 0)
#define LOOKUP_NORMAL (LOOKUP_PROTECT)
/* Even if the function found by lookup is a virtual function, it
should be called directly. */
#define LOOKUP_NONVIRTUAL (1 << 1)
/* Non-converting (i.e., "explicit") constructors are not tried. This flag
indicates that we are not performing direct-initialization. */
#define LOOKUP_ONLYCONVERTING (1 << 2)
#define LOOKUP_IMPLICIT (LOOKUP_NORMAL | LOOKUP_ONLYCONVERTING)
/* If a temporary is created, it should be created so that it lives
as long as the current variable bindings; otherwise it only lives
until the end of the complete-expression. It also forces
direct-initialization in cases where other parts of the compiler
have already generated a temporary, such as reference
initialization and the catch parameter. */
#define DIRECT_BIND (1 << 3)
/* We're performing a user-defined conversion, so more user-defined
conversions are not permitted (only built-in conversions). */
#define LOOKUP_NO_CONVERSION (1 << 4)
/* The user has explicitly called a destructor. (Therefore, we do
not need to check that the object is non-NULL before calling the
destructor.) */
#define LOOKUP_DESTRUCTOR (1 << 5)
/* Do not permit references to bind to temporaries. */
#define LOOKUP_NO_TEMP_BIND (1 << 6)
/* Do not accept objects, and possibly namespaces. */
#define LOOKUP_PREFER_TYPES (1 << 7)
/* Do not accept objects, and possibly types. */
#define LOOKUP_PREFER_NAMESPACES (1 << 8)
/* Accept types or namespaces. */
#define LOOKUP_PREFER_BOTH (LOOKUP_PREFER_TYPES | LOOKUP_PREFER_NAMESPACES)
/* Return friend declarations and un-declared builtin functions.
(Normally, these entities are registered in the symbol table, but
not found by lookup.) */
#define LOOKUP_HIDDEN (LOOKUP_PREFER_NAMESPACES << 1)
/* We're trying to treat an lvalue as an rvalue. */
#define LOOKUP_PREFER_RVALUE (LOOKUP_HIDDEN << 1)
/* We're inside an init-list, so narrowing conversions are ill-formed. */
#define LOOKUP_NO_NARROWING (LOOKUP_PREFER_RVALUE << 1)
/* We're looking up a constructor for list-initialization. */
#define LOOKUP_LIST_INIT_CTOR (LOOKUP_NO_NARROWING << 1)
/* This is the first parameter of a copy constructor. */
#define LOOKUP_COPY_PARM (LOOKUP_LIST_INIT_CTOR << 1)
/* We only want to consider list constructors. */
#define LOOKUP_LIST_ONLY (LOOKUP_COPY_PARM << 1)
/* Return after determining which function to call and checking access.
Used by sythesized_method_walk to determine which functions will
be called to initialize subobjects, in order to determine exception
specification and possible implicit delete.
This is kind of a hack, but exiting early avoids problems with trying
to perform argument conversions when the class isn't complete yet. */
#define LOOKUP_SPECULATIVE (LOOKUP_LIST_ONLY << 1)
/* Used by calls from defaulted functions to limit the overload set to avoid
cycles trying to declare them (core issue 1092). */
#define LOOKUP_DEFAULTED (LOOKUP_SPECULATIVE << 1)
/* Used in calls to store_init_value to suppress its usual call to
digest_init. */
#define LOOKUP_ALREADY_DIGESTED (LOOKUP_DEFAULTED << 1)
/* An instantiation with explicit template arguments. */
#define LOOKUP_EXPLICIT_TMPL_ARGS (LOOKUP_ALREADY_DIGESTED << 1)
/* Like LOOKUP_NO_TEMP_BIND, but also prevent binding to xvalues. */
#define LOOKUP_NO_RVAL_BIND (LOOKUP_EXPLICIT_TMPL_ARGS << 1)
/* Used by case_conversion to disregard non-integral conversions. */
#define LOOKUP_NO_NON_INTEGRAL (LOOKUP_NO_RVAL_BIND << 1)
/* Used for delegating constructors in order to diagnose self-delegation. */
#define LOOKUP_DELEGATING_CONS (LOOKUP_NO_NON_INTEGRAL << 1)
#define LOOKUP_NAMESPACES_ONLY(F) \
(((F) & LOOKUP_PREFER_NAMESPACES) && !((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_TYPES_ONLY(F) \
(!((F) & LOOKUP_PREFER_NAMESPACES) && ((F) & LOOKUP_PREFER_TYPES))
#define LOOKUP_QUALIFIERS_ONLY(F) ((F) & LOOKUP_PREFER_BOTH)
/* These flags are used by the conversion code.
CONV_IMPLICIT : Perform implicit conversions (standard and user-defined).
CONV_STATIC : Perform the explicit conversions for static_cast.
CONV_CONST : Perform the explicit conversions for const_cast.
CONV_REINTERPRET: Perform the explicit conversions for reinterpret_cast.
CONV_PRIVATE : Perform upcasts to private bases.
CONV_FORCE_TEMP : Require a new temporary when converting to the same
aggregate type. */
#define CONV_IMPLICIT 1
#define CONV_STATIC 2
#define CONV_CONST 4
#define CONV_REINTERPRET 8
#define CONV_PRIVATE 16
/* #define CONV_NONCONVERTING 32 */
#define CONV_FORCE_TEMP 64
#define CONV_FOLD 128
#define CONV_OLD_CONVERT (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET)
#define CONV_C_CAST (CONV_IMPLICIT | CONV_STATIC | CONV_CONST \
| CONV_REINTERPRET | CONV_PRIVATE | CONV_FORCE_TEMP)
#define CONV_BACKEND_CONVERT (CONV_OLD_CONVERT | CONV_FOLD)
/* Used by build_expr_type_conversion to indicate which types are
acceptable as arguments to the expression under consideration. */
#define WANT_INT 1 /* integer types, including bool */
#define WANT_FLOAT 2 /* floating point types */
#define WANT_ENUM 4 /* enumerated types */
#define WANT_POINTER 8 /* pointer types */
#define WANT_NULL 16 /* null pointer constant */
#define WANT_VECTOR_OR_COMPLEX 32 /* vector or complex types */
#define WANT_ARITH (WANT_INT | WANT_FLOAT | WANT_VECTOR_OR_COMPLEX)
/* Used with comptypes, and related functions, to guide type
comparison. */
#define COMPARE_STRICT 0 /* Just check if the types are the
same. */
#define COMPARE_BASE 1 /* Check to see if the second type is
derived from the first. */
#define COMPARE_DERIVED 2 /* Like COMPARE_BASE, but in
reverse. */
#define COMPARE_REDECLARATION 4 /* The comparison is being done when
another declaration of an existing
entity is seen. */
#define COMPARE_STRUCTURAL 8 /* The comparison is intended to be
structural. The actual comparison
will be identical to
COMPARE_STRICT. */
/* Used with start function. */
#define SF_DEFAULT 0 /* No flags. */
#define SF_PRE_PARSED 1 /* The function declaration has
already been parsed. */
#define SF_INCLASS_INLINE 2 /* The function is an inline, defined
in the class body. */
/* Used with start_decl's initialized parameter. */
#define SD_UNINITIALIZED 0
#define SD_INITIALIZED 1
#define SD_DEFAULTED 2
#define SD_DELETED 3
/* Returns nonzero iff TYPE1 and TYPE2 are the same type, or if TYPE2
is derived from TYPE1, or if TYPE2 is a pointer (reference) to a
class derived from the type pointed to (referred to) by TYPE1. */
#define same_or_base_type_p(TYPE1, TYPE2) \
comptypes ((TYPE1), (TYPE2), COMPARE_BASE)
/* These macros are used to access a TEMPLATE_PARM_INDEX. */
#define TEMPLATE_PARM_INDEX_CAST(NODE) \
((template_parm_index*)TEMPLATE_PARM_INDEX_CHECK (NODE))
#define TEMPLATE_PARM_IDX(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->index)
#define TEMPLATE_PARM_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->level)
#define TEMPLATE_PARM_DESCENDANTS(NODE) (TREE_CHAIN (NODE))
#define TEMPLATE_PARM_ORIG_LEVEL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->orig_level)
#define TEMPLATE_PARM_DECL(NODE) (TEMPLATE_PARM_INDEX_CAST (NODE)->decl)
#define TEMPLATE_PARM_PARAMETER_PACK(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_PARM_INDEX_CHECK (NODE)))
/* These macros are for accessing the fields of TEMPLATE_TYPE_PARM,
TEMPLATE_TEMPLATE_PARM and BOUND_TEMPLATE_TEMPLATE_PARM nodes. */
#define TEMPLATE_TYPE_PARM_INDEX(NODE) \
(TYPE_VALUES_RAW (TREE_CHECK3 ((NODE), TEMPLATE_TYPE_PARM, \
TEMPLATE_TEMPLATE_PARM, \
BOUND_TEMPLATE_TEMPLATE_PARM)))
#define TEMPLATE_TYPE_IDX(NODE) \
(TEMPLATE_PARM_IDX (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_LEVEL(NODE) \
(TEMPLATE_PARM_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_ORIG_LEVEL(NODE) \
(TEMPLATE_PARM_ORIG_LEVEL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_DECL(NODE) \
(TEMPLATE_PARM_DECL (TEMPLATE_TYPE_PARM_INDEX (NODE)))
#define TEMPLATE_TYPE_PARAMETER_PACK(NODE) \
(TEMPLATE_PARM_PARAMETER_PACK (TEMPLATE_TYPE_PARM_INDEX (NODE)))
/* For a C++17 class deduction placeholder, the template it represents. */
#define CLASS_PLACEHOLDER_TEMPLATE(NODE) \
(DECL_INITIAL (TYPE_NAME (TEMPLATE_TYPE_PARM_CHECK (NODE))))
/* Contexts in which auto deduction occurs. These flags are
used to control diagnostics in do_auto_deduction. */
enum auto_deduction_context
{
adc_unspecified, /* Not given */
adc_variable_type, /* Variable initializer deduction */
adc_return_type, /* Return type deduction */
adc_unify, /* Template argument deduction */
adc_requirement, /* Argument deduction constraint */
adc_decomp_type /* Decomposition declaration initializer deduction */
};
/* True if this type-parameter belongs to a class template, used by C++17
class template argument deduction. */
#define TEMPLATE_TYPE_PARM_FOR_CLASS(NODE) \
(TREE_LANG_FLAG_0 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* True iff this TEMPLATE_TYPE_PARM represents decltype(auto). */
#define AUTO_IS_DECLTYPE(NODE) \
(TYPE_LANG_FLAG_5 (TEMPLATE_TYPE_PARM_CHECK (NODE)))
/* These constants can used as bit flags in the process of tree formatting.
TFF_PLAIN_IDENTIFIER: unqualified part of a name.
TFF_SCOPE: include the class and namespace scope of the name.
TFF_CHASE_TYPEDEF: print the original type-id instead of the typedef-name.
TFF_DECL_SPECIFIERS: print decl-specifiers.
TFF_CLASS_KEY_OR_ENUM: precede a class-type name (resp. enum name) with
a class-key (resp. `enum').
TFF_RETURN_TYPE: include function return type.
TFF_FUNCTION_DEFAULT_ARGUMENTS: include function default parameter values.
TFF_EXCEPTION_SPECIFICATION: show function exception specification.
TFF_TEMPLATE_HEADER: show the template<...> header in a
template-declaration.
TFF_TEMPLATE_NAME: show only template-name.
TFF_EXPR_IN_PARENS: parenthesize expressions.
TFF_NO_FUNCTION_ARGUMENTS: don't show function arguments.
TFF_UNQUALIFIED_NAME: do not print the qualifying scope of the
top-level entity.
TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS: do not omit template arguments
identical to their defaults.
TFF_NO_TEMPLATE_BINDINGS: do not print information about the template
arguments for a function template specialization.
TFF_POINTER: we are printing a pointer type. */
#define TFF_PLAIN_IDENTIFIER (0)
#define TFF_SCOPE (1)
#define TFF_CHASE_TYPEDEF (1 << 1)
#define TFF_DECL_SPECIFIERS (1 << 2)
#define TFF_CLASS_KEY_OR_ENUM (1 << 3)
#define TFF_RETURN_TYPE (1 << 4)
#define TFF_FUNCTION_DEFAULT_ARGUMENTS (1 << 5)
#define TFF_EXCEPTION_SPECIFICATION (1 << 6)
#define TFF_TEMPLATE_HEADER (1 << 7)
#define TFF_TEMPLATE_NAME (1 << 8)
#define TFF_EXPR_IN_PARENS (1 << 9)
#define TFF_NO_FUNCTION_ARGUMENTS (1 << 10)
#define TFF_UNQUALIFIED_NAME (1 << 11)
#define TFF_NO_OMIT_DEFAULT_TEMPLATE_ARGUMENTS (1 << 12)
#define TFF_NO_TEMPLATE_BINDINGS (1 << 13)
#define TFF_POINTER (1 << 14)
/* Returns the TEMPLATE_DECL associated to a TEMPLATE_TEMPLATE_PARM
node. */
#define TEMPLATE_TEMPLATE_PARM_TEMPLATE_DECL(NODE) \
((TREE_CODE (NODE) == BOUND_TEMPLATE_TEMPLATE_PARM) \
? TYPE_TI_TEMPLATE (NODE) \
: TYPE_NAME (NODE))
/* in lex.c */
extern void init_reswords (void);
/* Various flags for the overloaded operator information. */
enum ovl_op_flags
{
OVL_OP_FLAG_NONE = 0, /* Don't care. */
OVL_OP_FLAG_UNARY = 1, /* Is unary. */
OVL_OP_FLAG_BINARY = 2, /* Is binary. */
OVL_OP_FLAG_AMBIARY = 3, /* May be unary or binary. */
OVL_OP_FLAG_ALLOC = 4, /* operator new or delete. */
OVL_OP_FLAG_DELETE = 1, /* operator delete. */
OVL_OP_FLAG_VEC = 2 /* vector new or delete. */
};
/* Compressed operator codes. Order is determined by operators.def
and does not match that of tree_codes. */
enum ovl_op_code
{
OVL_OP_ERROR_MARK,
OVL_OP_NOP_EXPR,
#define DEF_OPERATOR(NAME, CODE, MANGLING, FLAGS) OVL_OP_##CODE,
#define DEF_ASSN_OPERATOR(NAME, CODE, MANGLING) /* NOTHING */
#include "operators.def"
OVL_OP_MAX
};
struct GTY(()) ovl_op_info_t {
/* The IDENTIFIER_NODE for the operator. */
tree identifier;
/* The name of the operator. */
const char *name;
/* The mangled name of the operator. */
const char *mangled_name;
/* The (regular) tree code. */
enum tree_code tree_code : 16;
/* The (compressed) operator code. */
enum ovl_op_code ovl_op_code : 8;
/* The ovl_op_flags of the operator */
unsigned flags : 8;
};
/* Overloaded operator info indexed by ass_op_p & ovl_op_code. */
extern GTY(()) ovl_op_info_t ovl_op_info[2][OVL_OP_MAX];
/* Mapping from tree_codes to ovl_op_codes. */
extern GTY(()) unsigned char ovl_op_mapping[MAX_TREE_CODES];
/* Mapping for ambi-ary operators from the binary to the unary. */
extern GTY(()) unsigned char ovl_op_alternate[OVL_OP_MAX];
/* Given an ass_op_p boolean and a tree code, return a pointer to its
overloaded operator info. Tree codes for non-overloaded operators
map to the error-operator. */
#define OVL_OP_INFO(IS_ASS_P, TREE_CODE) \
(&ovl_op_info[(IS_ASS_P) != 0][ovl_op_mapping[(TREE_CODE)]])
/* Overloaded operator info for an identifier for which
IDENTIFIER_OVL_OP_P is true. */
#define IDENTIFIER_OVL_OP_INFO(NODE) \
(&ovl_op_info[IDENTIFIER_KIND_BIT_0 (NODE)][IDENTIFIER_CP_INDEX (NODE)])
#define IDENTIFIER_OVL_OP_FLAGS(NODE) \
(IDENTIFIER_OVL_OP_INFO (NODE)->flags)
/* A type-qualifier, or bitmask therefore, using the TYPE_QUAL
constants. */
typedef int cp_cv_quals;
/* Non-static member functions have an optional virt-specifier-seq.
There is a VIRT_SPEC value for each virt-specifier.
They can be combined by bitwise-or to form the complete set of
virt-specifiers for a member function. */
enum virt_specifier
{
VIRT_SPEC_UNSPECIFIED = 0x0,
VIRT_SPEC_FINAL = 0x1,
VIRT_SPEC_OVERRIDE = 0x2
};
/* A type-qualifier, or bitmask therefore, using the VIRT_SPEC
constants. */
typedef int cp_virt_specifiers;
/* Wherever there is a function-cv-qual, there could also be a ref-qualifier:
[dcl.fct]
The return type, the parameter-type-list, the ref-qualifier, and
the cv-qualifier-seq, but not the default arguments or the exception
specification, are part of the function type.
REF_QUAL_NONE Ordinary member function with no ref-qualifier
REF_QUAL_LVALUE Member function with the &-ref-qualifier
REF_QUAL_RVALUE Member function with the &&-ref-qualifier */
enum cp_ref_qualifier {
REF_QUAL_NONE = 0,
REF_QUAL_LVALUE = 1,
REF_QUAL_RVALUE = 2
};
/* A storage class. */
enum cp_storage_class {
/* sc_none must be zero so that zeroing a cp_decl_specifier_seq
sets the storage_class field to sc_none. */
sc_none = 0,
sc_auto,
sc_register,
sc_static,
sc_extern,
sc_mutable
};
/* An individual decl-specifier. This is used to index the array of
locations for the declspecs in struct cp_decl_specifier_seq
below. */
enum cp_decl_spec {
ds_first,
ds_signed = ds_first,
ds_unsigned,
ds_short,
ds_long,
ds_const,
ds_volatile,
ds_restrict,
ds_inline,
ds_virtual,
ds_explicit,
ds_friend,
ds_typedef,
ds_alias,
ds_constexpr,
ds_complex,
ds_thread,
ds_type_spec,
ds_redefined_builtin_type_spec,
ds_attribute,
ds_std_attribute,
ds_storage_class,
ds_long_long,
ds_concept,
ds_last /* This enumerator must always be the last one. */
};
/* A decl-specifier-seq. */
struct cp_decl_specifier_seq {
/* An array of locations for the declaration sepecifiers, indexed by
enum cp_decl_spec_word. */
source_location locations[ds_last];
/* The primary type, if any, given by the decl-specifier-seq.
Modifiers, like "short", "const", and "unsigned" are not
reflected here. This field will be a TYPE, unless a typedef-name
was used, in which case it will be a TYPE_DECL. */
tree type;
/* The attributes, if any, provided with the specifier sequence. */
tree attributes;
/* The c++11 attributes that follows the type specifier. */
tree std_attributes;
/* If non-NULL, a built-in type that the user attempted to redefine
to some other type. */
tree redefined_builtin_type;
/* The storage class specified -- or sc_none if no storage class was
explicitly specified. */
cp_storage_class storage_class;
/* For the __intN declspec, this stores the index into the int_n_* arrays. */
int int_n_idx;
/* True iff TYPE_SPEC defines a class or enum. */
BOOL_BITFIELD type_definition_p : 1;
/* True iff multiple types were (erroneously) specified for this
decl-specifier-seq. */
BOOL_BITFIELD multiple_types_p : 1;
/* True iff multiple storage classes were (erroneously) specified
for this decl-specifier-seq or a combination of a storage class
with a typedef specifier. */
BOOL_BITFIELD conflicting_specifiers_p : 1;
/* True iff at least one decl-specifier was found. */
BOOL_BITFIELD any_specifiers_p : 1;
/* True iff at least one type-specifier was found. */
BOOL_BITFIELD any_type_specifiers_p : 1;
/* True iff "int" was explicitly provided. */
BOOL_BITFIELD explicit_int_p : 1;
/* True iff "__intN" was explicitly provided. */
BOOL_BITFIELD explicit_intN_p : 1;
/* True iff "char" was explicitly provided. */
BOOL_BITFIELD explicit_char_p : 1;
/* True iff ds_thread is set for __thread, not thread_local. */
BOOL_BITFIELD gnu_thread_keyword_p : 1;
/* True iff the type is a decltype. */
BOOL_BITFIELD decltype_p : 1;
};
/* The various kinds of declarators. */
enum cp_declarator_kind {
cdk_id,
cdk_function,
cdk_array,
cdk_pointer,
cdk_reference,
cdk_ptrmem,
cdk_decomp,
cdk_error
};
/* A declarator. */
typedef struct cp_declarator cp_declarator;
typedef struct cp_parameter_declarator cp_parameter_declarator;
/* A parameter, before it has been semantically analyzed. */
struct cp_parameter_declarator {
/* The next parameter, or NULL_TREE if none. */
cp_parameter_declarator *next;
/* The decl-specifiers-seq for the parameter. */
cp_decl_specifier_seq decl_specifiers;
/* The declarator for the parameter. */
cp_declarator *declarator;
/* The default-argument expression, or NULL_TREE, if none. */
tree default_argument;
/* True iff this is a template parameter pack. */
bool template_parameter_pack_p;
/* Location within source. */
location_t loc;
};
/* A declarator. */
struct cp_declarator {
/* The kind of declarator. */
ENUM_BITFIELD (cp_declarator_kind) kind : 4;
/* Whether we parsed an ellipsis (`...') just before the declarator,
to indicate this is a parameter pack. */
BOOL_BITFIELD parameter_pack_p : 1;
/* If this declarator is parenthesized, this the open-paren. It is
UNKNOWN_LOCATION when not parenthesized. */
location_t parenthesized;
location_t id_loc; /* Currently only set for cdk_id, cdk_decomp and
cdk_function. */
/* GNU Attributes that apply to this declarator. If the declarator
is a pointer or a reference, these attribute apply to the type
pointed to. */
tree attributes;
/* Standard C++11 attributes that apply to this declarator. If the
declarator is a pointer or a reference, these attributes apply
to the pointer, rather than to the type pointed to. */
tree std_attributes;
/* For all but cdk_id, cdk_decomp and cdk_error, the contained declarator.
For cdk_id, cdk_decomp and cdk_error, guaranteed to be NULL. */
cp_declarator *declarator;
union {
/* For identifiers. */
struct {
/* If non-NULL, the qualifying scope (a NAMESPACE_DECL or
*_TYPE) for this identifier. */
tree qualifying_scope;
/* The unqualified name of the entity -- an IDENTIFIER_NODE,
BIT_NOT_EXPR, or TEMPLATE_ID_EXPR. */
tree unqualified_name;
/* If this is the name of a function, what kind of special
function (if any). */
special_function_kind sfk;
} id;
/* For functions. */
struct {
/* The parameters to the function as a TREE_LIST of decl/default. */
tree parameters;
/* The cv-qualifiers for the function. */
cp_cv_quals qualifiers;
/* The virt-specifiers for the function. */
cp_virt_specifiers virt_specifiers;
/* The ref-qualifier for the function. */
cp_ref_qualifier ref_qualifier;
/* The transaction-safety qualifier for the function. */
tree tx_qualifier;
/* The exception-specification for the function. */
tree exception_specification;
/* The late-specified return type, if any. */
tree late_return_type;
/* The trailing requires-clause, if any. */
tree requires_clause;
} function;
/* For arrays. */
struct {
/* The bounds to the array. */
tree bounds;
} array;
/* For cdk_pointer and cdk_ptrmem. */
struct {
/* The cv-qualifiers for the pointer. */
cp_cv_quals qualifiers;
/* For cdk_ptrmem, the class type containing the member. */
tree class_type;
} pointer;
/* For cdk_reference */
struct {
/* The cv-qualifiers for the reference. These qualifiers are
only used to diagnose ill-formed code. */
cp_cv_quals qualifiers;
/* Whether this is an rvalue reference */
bool rvalue_ref;
} reference;
} u;
};
/* A level of template instantiation. */
struct GTY((chain_next ("%h.next"))) tinst_level {
/* The immediately deeper level in the chain. */
struct tinst_level *next;
/* The original node. TLDCL can be a DECL (for a function or static
data member), a TYPE (for a class), depending on what we were
asked to instantiate, or a TREE_LIST with the template as PURPOSE
and the template args as VALUE, if we are substituting for
overload resolution. In all these cases, TARGS is NULL.
However, to avoid creating TREE_LIST objects for substitutions if
we can help, we store PURPOSE and VALUE in TLDCL and TARGS,
respectively. So TLDCL stands for TREE_LIST or DECL (the
template is a DECL too), whereas TARGS stands for the template
arguments. */
tree tldcl, targs;
private:
/* Return TRUE iff the original node is a split list. */
bool split_list_p () const { return targs; }
/* Return TRUE iff the original node is a TREE_LIST object. */
bool tree_list_p () const
{
return !split_list_p () && TREE_CODE (tldcl) == TREE_LIST;
}
/* Return TRUE iff the original node is not a list, split or not. */
bool not_list_p () const
{
return !split_list_p () && !tree_list_p ();
}
/* Convert (in place) the original node from a split list to a
TREE_LIST. */
tree to_list ();
public:
/* Release storage for OBJ and node, if it's a TREE_LIST. */
static void free (tinst_level *obj);
/* Return TRUE iff the original node is a list, split or not. */
bool list_p () const { return !not_list_p (); }
/* Return the original node; if it's a split list, make it a
TREE_LIST first, so that it can be returned as a single tree
object. */
tree get_node () {
if (!split_list_p ()) return tldcl;
else return to_list ();
}
/* Return the original node if it's a DECL or a TREE_LIST, but do
NOT convert a split list to a TREE_LIST: return NULL instead. */
tree maybe_get_node () const {
if (!split_list_p ()) return tldcl;
else return NULL_TREE;
}
/* The location where the template is instantiated. */
location_t locus;
/* errorcount + sorrycount when we pushed this level. */
unsigned short errors;
/* Count references to this object. If refcount reaches
refcount_infinity value, we don't increment or decrement the
refcount anymore, as the refcount isn't accurate anymore.
The object can be still garbage collected if unreferenced from
anywhere, which might keep referenced objects referenced longer than
otherwise necessary. Hitting the infinity is rare though. */
unsigned short refcount;
/* Infinity value for the above refcount. */
static const unsigned short refcount_infinity = (unsigned short) ~0;
};
bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq *, cp_decl_spec);
/* Return the type of the `this' parameter of FNTYPE. */
inline tree
type_of_this_parm (const_tree fntype)
{
function_args_iterator iter;
gcc_assert (TREE_CODE (fntype) == METHOD_TYPE);
function_args_iter_init (&iter, fntype);
return function_args_iter_cond (&iter);
}
/* Return the class of the `this' parameter of FNTYPE. */
inline tree
class_of_this_parm (const_tree fntype)
{
return TREE_TYPE (type_of_this_parm (fntype));
}
/* True iff T is a variable template declaration. */
inline bool
variable_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (!PRIMARY_TEMPLATE_P (t))
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r);
return false;
}
/* True iff T is a variable concept definition. That is, T is
a variable template declared with the concept specifier. */
inline bool
variable_concept_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* True iff T is a concept definition. That is, T is a variable or function
template declared with the concept specifier. */
inline bool
concept_template_p (tree t)
{
if (TREE_CODE (t) != TEMPLATE_DECL)
return false;
if (tree r = DECL_TEMPLATE_RESULT (t))
return VAR_OR_FUNCTION_DECL_P (r) && DECL_DECLARED_CONCEPT_P (r);
return false;
}
/* A parameter list indicating for a function with no parameters,
e.g "int f(void)". */
extern cp_parameter_declarator *no_parameters;
/* Various dump ids. */
extern int class_dump_id;
extern int raw_dump_id;
/* in call.c */
extern bool check_dtor_name (tree, tree);
int magic_varargs_p (tree);
extern tree build_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_addr_func (tree, tsubst_flags_t);
extern void set_flags_from_callee (tree);
extern tree build_call_a (tree, int, tree*);
extern tree build_call_n (tree, int, ...);
extern bool null_ptr_cst_p (tree);
extern bool null_member_pointer_value_p (tree);
extern bool sufficient_parms_p (const_tree);
extern tree type_decays_to (tree);
extern tree extract_call_expr (tree);
extern tree build_user_type_conversion (tree, tree, int,
tsubst_flags_t);
extern tree build_new_function_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_operator_new_call (tree, vec<tree, va_gc> **,
tree *, tree *, tree, tree,
tree *, tsubst_flags_t);
extern tree build_new_method_call (tree, tree,
vec<tree, va_gc> **, tree,
int, tree *, tsubst_flags_t);
extern tree build_special_member_call (tree, tree,
vec<tree, va_gc> **,
tree, int, tsubst_flags_t);
extern tree build_new_op (location_t, enum tree_code,
int, tree, tree, tree, tree *,
tsubst_flags_t);
extern tree build_op_call (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool aligned_allocation_fn_p (tree);
extern bool usual_deallocation_fn_p (tree);
extern tree build_op_delete_call (enum tree_code, tree, tree,
bool, tree, tree,
tsubst_flags_t);
extern bool can_convert (tree, tree, tsubst_flags_t);
extern bool can_convert_standard (tree, tree, tsubst_flags_t);
extern bool can_convert_arg (tree, tree, tree, int,
tsubst_flags_t);
extern bool can_convert_arg_bad (tree, tree, tree, int,
tsubst_flags_t);
extern location_t get_fndecl_argument_location (tree, int);
/* A class for recording information about access failures (e.g. private
fields), so that we can potentially supply a fix-it hint about
an accessor (from a context in which the constness of the object
is known). */
class access_failure_info
{
public:
access_failure_info () : m_was_inaccessible (false), m_basetype_path (NULL_TREE),
m_field_decl (NULL_TREE) {}
void record_access_failure (tree basetype_path, tree field_decl);
void maybe_suggest_accessor (bool const_p) const;
private:
bool m_was_inaccessible;
tree m_basetype_path;
tree m_field_decl;
};
extern bool enforce_access (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern void push_defarg_context (tree);
extern void pop_defarg_context (void);
extern tree convert_default_arg (tree, tree, tree, int,
tsubst_flags_t);
extern tree convert_arg_to_ellipsis (tree, tsubst_flags_t);
extern tree build_x_va_arg (source_location, tree, tree);
extern tree cxx_type_promotes_to (tree);
extern tree type_passed_as (tree);
extern tree convert_for_arg_passing (tree, tree, tsubst_flags_t);
extern bool is_properly_derived_from (tree, tree);
extern tree initialize_reference (tree, tree, int,
tsubst_flags_t);
extern tree extend_ref_init_temps (tree, tree, vec<tree, va_gc>**);
extern tree make_temporary_var_for_ref_to_temp (tree, tree);
extern bool type_has_extended_temps (tree);
extern tree strip_top_quals (tree);
extern bool reference_related_p (tree, tree);
extern int remaining_arguments (tree);
extern tree perform_implicit_conversion (tree, tree, tsubst_flags_t);
extern tree perform_implicit_conversion_flags (tree, tree, tsubst_flags_t, int);
extern tree build_converted_constant_expr (tree, tree, tsubst_flags_t);
extern tree perform_direct_initialization_if_possible (tree, tree, bool,
tsubst_flags_t);
extern tree in_charge_arg_for_name (tree);
extern tree build_cxx_call (tree, int, tree *,
tsubst_flags_t);
extern bool is_std_init_list (tree);
extern bool is_list_ctor (tree);
extern void validate_conversion_obstack (void);
extern void mark_versions_used (tree);
extern tree get_function_version_dispatcher (tree);
/* in class.c */
extern tree build_vfield_ref (tree, tree);
extern tree build_if_in_charge (tree true_stmt, tree false_stmt = void_node);
extern tree build_base_path (enum tree_code, tree,
tree, int, tsubst_flags_t);
extern tree convert_to_base (tree, tree, bool, bool,
tsubst_flags_t);
extern tree convert_to_base_statically (tree, tree);
extern tree build_vtbl_ref (tree, tree);
extern tree build_vfn_ref (tree, tree);
extern tree get_vtable_decl (tree, int);
extern bool add_method (tree, tree, bool);
extern tree declared_access (tree);
extern tree currently_open_class (tree);
extern tree currently_open_derived_class (tree);
extern tree outermost_open_class (void);
extern tree current_nonlambda_class_type (void);
extern tree finish_struct (tree, tree);
extern void finish_struct_1 (tree);
extern int resolves_to_fixed_type_p (tree, int *);
extern void init_class_processing (void);
extern int is_empty_class (tree);
extern bool is_really_empty_class (tree);
extern void pushclass (tree);
extern void popclass (void);
extern void push_nested_class (tree);
extern void pop_nested_class (void);
extern int current_lang_depth (void);
extern void push_lang_context (tree);
extern void pop_lang_context (void);
extern tree instantiate_type (tree, tree, tsubst_flags_t);
extern void build_self_reference (void);
extern int same_signature_p (const_tree, const_tree);
extern void maybe_add_class_template_decl_list (tree, tree, int);
extern void unreverse_member_declarations (tree);
extern void invalidate_class_lookup_cache (void);
extern void maybe_note_name_used_in_class (tree, tree);
extern void note_name_declared_in_class (tree, tree);
extern tree get_vtbl_decl_for_binfo (tree);
extern bool vptr_via_virtual_p (tree);
extern void debug_class (tree);
extern void debug_thunks (tree);
extern void set_linkage_according_to_type (tree, tree);
extern void determine_key_method (tree);
extern void check_for_override (tree, tree);
extern void push_class_stack (void);
extern void pop_class_stack (void);
extern bool default_ctor_p (tree);
extern bool type_has_user_nondefault_constructor (tree);
extern tree in_class_defaulted_default_constructor (tree);
extern bool user_provided_p (tree);
extern bool type_has_user_provided_constructor (tree);
extern bool type_has_non_user_provided_default_constructor (tree);
extern bool vbase_has_user_provided_move_assign (tree);
extern tree default_init_uninitialized_part (tree);
extern bool trivial_default_constructor_is_constexpr (tree);
extern bool type_has_constexpr_default_constructor (tree);
extern bool type_has_virtual_destructor (tree);
extern bool classtype_has_move_assign_or_move_ctor_p (tree, bool user_declared);
extern bool classtype_has_non_deleted_move_ctor (tree);
extern bool type_build_ctor_call (tree);
extern bool type_build_dtor_call (tree);
extern void explain_non_literal_class (tree);
extern void inherit_targ_abi_tags (tree);
extern void defaulted_late_check (tree);
extern bool defaultable_fn_check (tree);
extern void check_abi_tags (tree);
extern tree missing_abi_tags (tree);
extern void fixup_type_variants (tree);
extern void fixup_attribute_variants (tree);
extern tree* decl_cloned_function_p (const_tree, bool);
extern void clone_function_decl (tree, bool);
extern void adjust_clone_args (tree);
extern void deduce_noexcept_on_destructor (tree);
extern bool uniquely_derived_from_p (tree, tree);
extern bool publicly_uniquely_derived_p (tree, tree);
extern tree common_enclosing_class (tree, tree);
/* in cvt.c */
extern tree convert_to_reference (tree, tree, int, int, tree,
tsubst_flags_t);
extern tree convert_from_reference (tree);
extern tree force_rvalue (tree, tsubst_flags_t);
extern tree ocp_convert (tree, tree, int, int,
tsubst_flags_t);
extern tree cp_convert (tree, tree, tsubst_flags_t);
extern tree cp_convert_and_check (tree, tree, tsubst_flags_t);
extern tree cp_fold_convert (tree, tree);
extern tree cp_get_callee (tree);
extern tree cp_get_callee_fndecl (tree);
extern tree cp_get_callee_fndecl_nofold (tree);
extern tree cp_get_fndecl_from_callee (tree, bool fold = true);
extern tree convert_to_void (tree, impl_conv_void,
tsubst_flags_t);
extern tree convert_force (tree, tree, int,
tsubst_flags_t);
extern tree build_expr_type_conversion (int, tree, bool);
extern tree type_promotes_to (tree);
extern bool can_convert_qual (tree, tree);
extern tree perform_qualification_conversions (tree, tree);
extern bool tx_safe_fn_type_p (tree);
extern tree tx_unsafe_fn_variant (tree);
extern bool fnptr_conv_p (tree, tree);
extern tree strip_fnptr_conv (tree);
/* in name-lookup.c */
extern void maybe_push_cleanup_level (tree);
extern tree make_anon_name (void);
extern tree check_for_out_of_scope_variable (tree);
extern void dump (cp_binding_level &ref);
extern void dump (cp_binding_level *ptr);
extern void print_other_binding_stack (cp_binding_level *);
extern tree maybe_push_decl (tree);
extern tree current_decl_namespace (void);
/* decl.c */
extern tree poplevel (int, int, int);
extern void cxx_init_decl_processing (void);
enum cp_tree_node_structure_enum cp_tree_node_structure
(union lang_tree_node *);
extern void finish_scope (void);
extern void push_switch (tree);
extern void pop_switch (void);
extern void note_break_stmt (void);
extern bool note_iteration_stmt_body_start (void);
extern void note_iteration_stmt_body_end (bool);
extern tree make_lambda_name (void);
extern int decls_match (tree, tree, bool = true);
extern bool maybe_version_functions (tree, tree, bool);
extern tree duplicate_decls (tree, tree, bool);
extern tree declare_local_label (tree);
extern tree define_label (location_t, tree);
extern void check_goto (tree);
extern bool check_omp_return (void);
extern tree make_typename_type (tree, tree, enum tag_types, tsubst_flags_t);
extern tree build_typename_type (tree, tree, tree, tag_types);
extern tree make_unbound_class_template (tree, tree, tree, tsubst_flags_t);
extern tree build_library_fn_ptr (const char *, tree, int);
extern tree build_cp_library_fn_ptr (const char *, tree, int);
extern tree push_library_fn (tree, tree, tree, int);
extern tree push_void_library_fn (tree, tree, int);
extern tree push_throw_library_fn (tree, tree);
extern void warn_misplaced_attr_for_class_type (source_location location,
tree class_type);
extern tree check_tag_decl (cp_decl_specifier_seq *, bool);
extern tree shadow_tag (cp_decl_specifier_seq *);
extern tree groktypename (cp_decl_specifier_seq *, const cp_declarator *, bool);
extern tree start_decl (const cp_declarator *, cp_decl_specifier_seq *, int, tree, tree, tree *);
extern void start_decl_1 (tree, bool);
extern bool check_array_initializer (tree, tree, tree);
extern void cp_finish_decl (tree, tree, bool, tree, int);
extern tree lookup_decomp_type (tree);
extern void cp_maybe_mangle_decomp (tree, tree, unsigned int);
extern void cp_finish_decomp (tree, tree, unsigned int);
extern int cp_complete_array_type (tree *, tree, bool);
extern int cp_complete_array_type_or_error (tree *, tree, bool, tsubst_flags_t);
extern tree build_ptrmemfunc_type (tree);
extern tree build_ptrmem_type (tree, tree);
/* the grokdeclarator prototype is in decl.h */
extern tree build_this_parm (tree, tree, cp_cv_quals);
extern tree grokparms (tree, tree *);
extern int copy_fn_p (const_tree);
extern bool move_fn_p (const_tree);
extern bool move_signature_fn_p (const_tree);
extern tree get_scope_of_declarator (const cp_declarator *);
extern void grok_special_member_properties (tree);
extern bool grok_ctor_properties (const_tree, const_tree);
extern bool grok_op_properties (tree, bool);
extern tree xref_tag (enum tag_types, tree, tag_scope, bool);
extern tree xref_tag_from_type (tree, tree, tag_scope);
extern void xref_basetypes (tree, tree);
extern tree start_enum (tree, tree, tree, tree, bool, bool *);
extern void finish_enum_value_list (tree);
extern void finish_enum (tree);
extern void build_enumerator (tree, tree, tree, tree, location_t);
extern tree lookup_enumerator (tree, tree);
extern bool start_preparsed_function (tree, tree, int);
extern bool start_function (cp_decl_specifier_seq *,
const cp_declarator *, tree);
extern tree begin_function_body (void);
extern void finish_function_body (tree);
extern tree outer_curly_brace_block (tree);
extern tree finish_function (bool);
extern tree grokmethod (cp_decl_specifier_seq *, const cp_declarator *, tree);
extern void maybe_register_incomplete_var (tree);
extern void maybe_commonize_var (tree);
extern void complete_vars (tree);
extern tree static_fn_type (tree);
extern void revert_static_member_fn (tree);
extern void fixup_anonymous_aggr (tree);
extern tree compute_array_index_type (tree, tree, tsubst_flags_t);
extern tree check_default_argument (tree, tree, tsubst_flags_t);
extern int wrapup_namespace_globals ();
extern tree create_implicit_typedef (tree, tree);
extern int local_variable_p (const_tree);
extern tree register_dtor_fn (tree);
extern tmpl_spec_kind current_tmpl_spec_kind (int);
extern tree cp_fname_init (const char *, tree *);
extern tree cxx_builtin_function (tree decl);
extern tree cxx_builtin_function_ext_scope (tree decl);
extern tree check_elaborated_type_specifier (enum tag_types, tree, bool);
extern void warn_extern_redeclared_static (tree, tree);
extern tree cxx_comdat_group (tree);
extern bool cp_missing_noreturn_ok_p (tree);
extern bool is_direct_enum_init (tree, tree);
extern void initialize_artificial_var (tree, vec<constructor_elt, va_gc> *);
extern tree check_var_type (tree, tree);
extern tree reshape_init (tree, tree, tsubst_flags_t);
extern tree next_initializable_field (tree);
extern tree fndecl_declared_return_type (tree);
extern bool undeduced_auto_decl (tree);
extern bool require_deduced_type (tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_case_label (location_t, tree, tree);
extern tree cxx_maybe_build_cleanup (tree, tsubst_flags_t);
extern bool check_array_designated_initializer (constructor_elt *,
unsigned HOST_WIDE_INT);
extern bool check_for_uninitialized_const_var (tree, bool, tsubst_flags_t);
/* in decl2.c */
extern void record_mangling (tree, bool);
extern void overwrite_mangling (tree, tree);
extern void note_mangling_alias (tree, tree);
extern void generate_mangling_aliases (void);
extern tree build_memfn_type (tree, tree, cp_cv_quals, cp_ref_qualifier);
extern tree build_pointer_ptrmemfn_type (tree);
extern tree change_return_type (tree, tree);
extern void maybe_retrofit_in_chrg (tree);
extern void maybe_make_one_only (tree);
extern bool vague_linkage_p (tree);
extern void grokclassfn (tree, tree,
enum overload_flags);
extern tree grok_array_decl (location_t, tree, tree, bool);
extern tree delete_sanity (tree, tree, bool, int, tsubst_flags_t);
extern tree check_classfn (tree, tree, tree);
extern void check_member_template (tree);
extern tree grokfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, bool, tree, tree);
extern tree grokbitfield (const cp_declarator *, cp_decl_specifier_seq *,
tree, tree, tree);
extern bool any_dependent_type_attributes_p (tree);
extern tree cp_reconstruct_complex_type (tree, tree);
extern bool attributes_naming_typedef_ok (tree);
extern void cplus_decl_attributes (tree *, tree, int);
extern void finish_anon_union (tree);
extern void cxx_post_compilation_parsing_cleanups (void);
extern tree coerce_new_type (tree);
extern tree coerce_delete_type (tree);
extern void comdat_linkage (tree);
extern void determine_visibility (tree);
extern void constrain_class_visibility (tree);
extern void reset_type_linkage (tree);
extern void tentative_decl_linkage (tree);
extern void import_export_decl (tree);
extern tree build_cleanup (tree);
extern tree build_offset_ref_call_from_tree (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern bool decl_defined_p (tree);
extern bool decl_constant_var_p (tree);
extern bool decl_maybe_constant_var_p (tree);
extern void no_linkage_error (tree);
extern void check_default_args (tree);
extern bool mark_used (tree);
extern bool mark_used (tree, tsubst_flags_t);
extern void finish_static_data_member_decl (tree, tree, bool, tree, int);
extern tree cp_build_parm_decl (tree, tree, tree);
extern tree get_guard (tree);
extern tree get_guard_cond (tree, bool);
extern tree set_guard (tree);
extern tree maybe_get_tls_wrapper_call (tree);
extern void mark_needed (tree);
extern bool decl_needed_p (tree);
extern void note_vague_linkage_fn (tree);
extern void note_variable_template_instantiation (tree);
extern tree build_artificial_parm (tree, tree, tree);
extern bool possibly_inlined_p (tree);
extern int parm_index (tree);
extern tree vtv_start_verification_constructor_init_function (void);
extern tree vtv_finish_verification_constructor_init_function (tree);
extern bool cp_omp_mappable_type (tree);
/* in error.c */
extern const char *type_as_string (tree, int);
extern const char *type_as_string_translate (tree, int);
extern const char *decl_as_string (tree, int);
extern const char *decl_as_string_translate (tree, int);
extern const char *decl_as_dwarf_string (tree, int);
extern const char *expr_as_string (tree, int);
extern const char *lang_decl_name (tree, int, bool);
extern const char *lang_decl_dwarf_name (tree, int, bool);
extern const char *language_to_string (enum languages);
extern const char *class_key_or_enum_as_string (tree);
extern void maybe_warn_variadic_templates (void);
extern void maybe_warn_cpp0x (cpp0x_warn_str str);
extern bool pedwarn_cxx98 (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4);
extern location_t location_of (tree);
extern void qualified_name_lookup_error (tree, tree, tree,
location_t);
/* in except.c */
extern void init_exception_processing (void);
extern tree expand_start_catch_block (tree);
extern void expand_end_catch_block (void);
extern tree build_exc_ptr (void);
extern tree build_throw (tree);
extern int nothrow_libfn_p (const_tree);
extern void check_handlers (tree);
extern tree finish_noexcept_expr (tree, tsubst_flags_t);
extern bool expr_noexcept_p (tree, tsubst_flags_t);
extern void perform_deferred_noexcept_checks (void);
extern bool nothrow_spec_p (const_tree);
extern bool type_noexcept_p (const_tree);
extern bool type_throw_all_p (const_tree);
extern tree build_noexcept_spec (tree, int);
extern void choose_personality_routine (enum languages);
extern tree build_must_not_throw_expr (tree,tree);
extern tree eh_type_info (tree);
extern tree begin_eh_spec_block (void);
extern void finish_eh_spec_block (tree, tree);
extern tree build_eh_type_type (tree);
extern tree cp_protect_cleanup_actions (void);
extern tree create_try_catch_expr (tree, tree);
/* in expr.c */
extern tree cplus_expand_constant (tree);
extern tree mark_use (tree expr, bool rvalue_p, bool read_p,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_rvalue_use (tree,
location_t = UNKNOWN_LOCATION,
bool reject_builtin = true);
extern tree mark_lvalue_use (tree);
extern tree mark_lvalue_use_nonread (tree);
extern tree mark_type_use (tree);
extern tree mark_discarded_use (tree);
extern void mark_exp_read (tree);
/* friend.c */
extern int is_friend (tree, tree);
extern void make_friend_class (tree, tree, bool);
extern void add_friend (tree, tree, bool);
extern tree do_friend (tree, tree, tree, tree,
enum overload_flags, bool);
extern void set_global_friend (tree);
extern bool is_global_friend (tree);
/* in init.c */
extern tree expand_member_init (tree);
extern void emit_mem_initializers (tree);
extern tree build_aggr_init (tree, tree, int,
tsubst_flags_t);
extern int is_class_type (tree, int);
extern tree get_type_value (tree);
extern tree build_zero_init (tree, tree, bool);
extern tree build_value_init (tree, tsubst_flags_t);
extern tree build_value_init_noctor (tree, tsubst_flags_t);
extern tree get_nsdmi (tree, bool, tsubst_flags_t);
extern tree build_offset_ref (tree, tree, bool,
tsubst_flags_t);
extern tree throw_bad_array_new_length (void);
extern bool type_has_new_extended_alignment (tree);
extern unsigned malloc_alignment (void);
extern tree build_new (vec<tree, va_gc> **, tree, tree,
vec<tree, va_gc> **, int,
tsubst_flags_t);
extern tree get_temp_regvar (tree, tree);
extern tree build_vec_init (tree, tree, tree, bool, int,
tsubst_flags_t);
extern tree build_delete (tree, tree,
special_function_kind,
int, int, tsubst_flags_t);
extern void push_base_cleanups (void);
extern tree build_vec_delete (tree, tree,
special_function_kind, int,
tsubst_flags_t);
extern tree create_temporary_var (tree);
extern void initialize_vtbl_ptrs (tree);
extern tree scalar_constant_value (tree);
extern tree decl_really_constant_value (tree);
extern int diagnose_uninitialized_cst_or_ref_member (tree, bool, bool);
extern tree build_vtbl_address (tree);
extern bool maybe_reject_flexarray_init (tree, tree);
/* in lex.c */
extern void cxx_dup_lang_specific_decl (tree);
extern void yyungetc (int, int);
extern tree unqualified_name_lookup_error (tree,
location_t = UNKNOWN_LOCATION);
extern tree unqualified_fn_lookup_error (cp_expr);
extern tree make_conv_op_name (tree);
extern tree build_lang_decl (enum tree_code, tree, tree);
extern tree build_lang_decl_loc (location_t, enum tree_code, tree, tree);
extern void retrofit_lang_decl (tree);
extern void fit_decomposition_lang_decl (tree, tree);
extern tree copy_decl (tree CXX_MEM_STAT_INFO);
extern tree copy_type (tree CXX_MEM_STAT_INFO);
extern tree cxx_make_type (enum tree_code);
extern tree make_class_type (enum tree_code);
extern const char *get_identifier_kind_name (tree);
extern void set_identifier_kind (tree, cp_identifier_kind);
extern bool cxx_init (void);
extern void cxx_finish (void);
extern bool in_main_input_context (void);
/* in method.c */
extern void init_method (void);
extern tree make_thunk (tree, bool, tree, tree);
extern void finish_thunk (tree);
extern void use_thunk (tree, bool);
extern bool trivial_fn_p (tree);
extern tree forward_parm (tree);
extern bool is_trivially_xible (enum tree_code, tree, tree);
extern bool is_xible (enum tree_code, tree, tree);
extern tree get_defaulted_eh_spec (tree, tsubst_flags_t = tf_warning_or_error);
extern void after_nsdmi_defaulted_late_checks (tree);
extern bool maybe_explain_implicit_delete (tree);
extern void explain_implicit_non_constexpr (tree);
extern void deduce_inheriting_ctor (tree);
extern void synthesize_method (tree);
extern tree lazily_declare_fn (special_function_kind,
tree);
extern tree skip_artificial_parms_for (const_tree, tree);
extern int num_artificial_parms_for (const_tree);
extern tree make_alias_for (tree, tree);
extern tree get_copy_ctor (tree, tsubst_flags_t);
extern tree get_copy_assign (tree);
extern tree get_default_ctor (tree);
extern tree get_dtor (tree, tsubst_flags_t);
extern tree strip_inheriting_ctors (tree);
extern tree inherited_ctor_binfo (tree);
extern bool ctor_omit_inherited_parms (tree);
extern tree locate_ctor (tree);
extern tree implicitly_declare_fn (special_function_kind, tree,
bool, tree, tree);
/* In optimize.c */
extern bool maybe_clone_body (tree);
/* In parser.c */
extern tree cp_convert_range_for (tree, tree, tree, tree, unsigned int, bool,
unsigned short);
extern bool parsing_nsdmi (void);
extern bool parsing_default_capturing_generic_lambda_in_template (void);
extern void inject_this_parameter (tree, cp_cv_quals);
extern location_t defarg_location (tree);
extern void maybe_show_extern_c_location (void);
/* in pt.c */
extern bool check_template_shadow (tree);
extern bool check_auto_in_tmpl_args (tree, tree);
extern tree get_innermost_template_args (tree, int);
extern void maybe_begin_member_template_processing (tree);
extern void maybe_end_member_template_processing (void);
extern tree finish_member_template_decl (tree);
extern void begin_template_parm_list (void);
extern bool begin_specialization (void);
extern void reset_specialization (void);
extern void end_specialization (void);
extern void begin_explicit_instantiation (void);
extern void end_explicit_instantiation (void);
extern void check_unqualified_spec_or_inst (tree, location_t);
extern tree check_explicit_specialization (tree, tree, int, int,
tree = NULL_TREE);
extern int num_template_headers_for_class (tree);
extern void check_template_variable (tree);
extern tree make_auto (void);
extern tree make_decltype_auto (void);
extern tree make_template_placeholder (tree);
extern bool template_placeholder_p (tree);
extern tree do_auto_deduction (tree, tree, tree,
tsubst_flags_t
= tf_warning_or_error,
auto_deduction_context
= adc_unspecified,
tree = NULL_TREE,
int = LOOKUP_NORMAL);
extern tree type_uses_auto (tree);
extern tree type_uses_auto_or_concept (tree);
extern void append_type_to_template_for_access_check (tree, tree, tree,
location_t);
extern tree convert_generic_types_to_packs (tree, int, int);
extern tree splice_late_return_type (tree, tree);
extern bool is_auto (const_tree);
extern tree process_template_parm (tree, location_t, tree,
bool, bool);
extern tree end_template_parm_list (tree);
extern void end_template_parm_list (void);
extern void end_template_decl (void);
extern tree maybe_update_decl_type (tree, tree);
extern bool check_default_tmpl_args (tree, tree, bool, bool, int);
extern tree push_template_decl (tree);
extern tree push_template_decl_real (tree, bool);
extern tree add_inherited_template_parms (tree, tree);
extern bool redeclare_class_template (tree, tree, tree);
extern tree lookup_template_class (tree, tree, tree, tree,
int, tsubst_flags_t);
extern tree lookup_template_function (tree, tree);
extern tree lookup_template_variable (tree, tree);
extern int uses_template_parms (tree);
extern bool uses_template_parms_level (tree, int);
extern bool in_template_function (void);
extern bool need_generic_capture (void);
extern tree instantiate_class_template (tree);
extern tree instantiate_template (tree, tree, tsubst_flags_t);
extern tree fn_type_unification (tree, tree, tree,
const tree *, unsigned int,
tree, unification_kind_t, int,
bool, bool);
extern void mark_decl_instantiated (tree, int);
extern int more_specialized_fn (tree, tree, int);
extern void do_decl_instantiation (tree, tree);
extern void do_type_instantiation (tree, tree, tsubst_flags_t);
extern bool always_instantiate_p (tree);
extern bool maybe_instantiate_noexcept (tree, tsubst_flags_t = tf_warning_or_error);
extern tree instantiate_decl (tree, bool, bool);
extern int comp_template_parms (const_tree, const_tree);
extern bool builtin_pack_fn_p (tree);
extern bool uses_parameter_packs (tree);
extern bool template_parameter_pack_p (const_tree);
extern bool function_parameter_pack_p (const_tree);
extern bool function_parameter_expanded_from_pack_p (tree, tree);
extern tree make_pack_expansion (tree, tsubst_flags_t = tf_warning_or_error);
extern bool check_for_bare_parameter_packs (tree, location_t = UNKNOWN_LOCATION);
extern tree build_template_info (tree, tree);
extern tree get_template_info (const_tree);
extern vec<qualified_typedef_usage_t, va_gc> *get_types_needing_access_check (tree);
extern int template_class_depth (tree);
extern int is_specialization_of (tree, tree);
extern bool is_specialization_of_friend (tree, tree);
extern tree get_pattern_parm (tree, tree);
extern int comp_template_args (tree, tree, tree * = NULL,
tree * = NULL, bool = false);
extern int template_args_equal (tree, tree, bool = false);
extern tree maybe_process_partial_specialization (tree);
extern tree most_specialized_instantiation (tree);
extern void print_candidates (tree);
extern void instantiate_pending_templates (int);
extern tree tsubst_default_argument (tree, int, tree, tree,
tsubst_flags_t);
extern tree tsubst (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_copy_and_build (tree, tree, tsubst_flags_t,
tree, bool, bool);
extern tree tsubst_expr (tree, tree, tsubst_flags_t,
tree, bool);
extern tree tsubst_pack_expansion (tree, tree, tsubst_flags_t, tree);
extern tree most_general_template (tree);
extern tree get_mostly_instantiated_function_type (tree);
extern bool problematic_instantiation_changed (void);
extern void record_last_problematic_instantiation (void);
extern struct tinst_level *current_instantiation(void);
extern bool instantiating_current_function_p (void);
extern tree maybe_get_template_decl_from_type_decl (tree);
extern int processing_template_parmlist;
extern bool dependent_type_p (tree);
extern bool dependent_scope_p (tree);
extern bool any_dependent_template_arguments_p (const_tree);
extern bool any_erroneous_template_args_p (const_tree);
extern bool dependent_template_p (tree);
extern bool dependent_template_id_p (tree, tree);
extern bool type_dependent_expression_p (tree);
extern bool type_dependent_object_expression_p (tree);
extern bool any_type_dependent_arguments_p (const vec<tree, va_gc> *);
extern bool any_type_dependent_elements_p (const_tree);
extern bool type_dependent_expression_p_push (tree);
extern bool value_dependent_expression_p (tree);
extern bool instantiation_dependent_expression_p (tree);
extern bool instantiation_dependent_uneval_expression_p (tree);
extern bool any_value_dependent_elements_p (const_tree);
extern bool dependent_omp_for_p (tree, tree, tree, tree);
extern tree resolve_typename_type (tree, bool);
extern tree template_for_substitution (tree);
extern tree build_non_dependent_expr (tree);
extern void make_args_non_dependent (vec<tree, va_gc> *);
extern bool reregister_specialization (tree, tree, tree);
extern tree instantiate_non_dependent_expr (tree);
extern tree instantiate_non_dependent_expr_sfinae (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_expr_internal (tree, tsubst_flags_t);
extern tree instantiate_non_dependent_or_null (tree);
extern bool variable_template_specialization_p (tree);
extern bool alias_type_or_template_p (tree);
extern bool alias_template_specialization_p (const_tree);
extern bool dependent_alias_template_spec_p (const_tree);
extern bool explicit_class_specialization_p (tree);
extern bool push_tinst_level (tree);
extern bool push_tinst_level_loc (tree, location_t);
extern void pop_tinst_level (void);
extern struct tinst_level *outermost_tinst_level(void);
extern void init_template_processing (void);
extern void print_template_statistics (void);
bool template_template_parameter_p (const_tree);
bool template_type_parameter_p (const_tree);
extern bool primary_template_specialization_p (const_tree);
extern tree get_primary_template_innermost_parameters (const_tree);
extern tree get_template_parms_at_level (tree, int);
extern tree get_template_innermost_arguments (const_tree);
extern tree get_template_argument_pack_elems (const_tree);
extern tree get_function_template_decl (const_tree);
extern tree resolve_nondeduced_context (tree, tsubst_flags_t);
extern hashval_t iterative_hash_template_arg (tree arg, hashval_t val);
extern tree coerce_template_parms (tree, tree, tree);
extern tree coerce_template_parms (tree, tree, tree, tsubst_flags_t);
extern void register_local_specialization (tree, tree);
extern tree retrieve_local_specialization (tree);
extern tree extract_fnparm_pack (tree, tree *);
extern tree template_parm_to_arg (tree);
extern tree dguide_name (tree);
extern bool dguide_name_p (tree);
extern bool deduction_guide_p (const_tree);
extern bool copy_guide_p (const_tree);
extern bool template_guide_p (const_tree);
/* in repo.c */
extern void init_repo (void);
extern int repo_emit_p (tree);
extern bool repo_export_class_p (const_tree);
extern void finish_repo (void);
/* in rtti.c */
/* A vector of all tinfo decls that haven't been emitted yet. */
extern GTY(()) vec<tree, va_gc> *unemitted_tinfo_decls;
extern void init_rtti_processing (void);
extern tree build_typeid (tree, tsubst_flags_t);
extern tree get_tinfo_decl (tree);
extern tree get_typeid (tree, tsubst_flags_t);
extern tree build_headof (tree);
extern tree build_dynamic_cast (tree, tree, tsubst_flags_t);
extern void emit_support_tinfos (void);
extern bool emit_tinfo_decl (tree);
/* in search.c */
extern bool accessible_base_p (tree, tree, bool);
extern tree lookup_base (tree, tree, base_access,
base_kind *, tsubst_flags_t);
extern tree dcast_base_hint (tree, tree);
extern int accessible_p (tree, tree, bool);
extern int accessible_in_template_p (tree, tree);
extern tree lookup_field (tree, tree, int, bool);
extern tree lookup_fnfields (tree, tree, int);
extern tree lookup_member (tree, tree, int, bool,
tsubst_flags_t,
access_failure_info *afi = NULL);
extern tree lookup_member_fuzzy (tree, tree, bool);
extern tree locate_field_accessor (tree, tree, bool);
extern int look_for_overrides (tree, tree);
extern void get_pure_virtuals (tree);
extern void maybe_suppress_debug_info (tree);
extern void note_debug_info_needed (tree);
extern tree current_scope (void);
extern int at_function_scope_p (void);
extern bool at_class_scope_p (void);
extern bool at_namespace_scope_p (void);
extern tree context_for_name_lookup (tree);
extern tree lookup_conversions (tree);
extern tree binfo_from_vbase (tree);
extern tree binfo_for_vbase (tree, tree);
extern tree look_for_overrides_here (tree, tree);
#define dfs_skip_bases ((tree)1)
extern tree dfs_walk_all (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree dfs_walk_once (tree, tree (*) (tree, void *),
tree (*) (tree, void *), void *);
extern tree binfo_via_virtual (tree, tree);
extern bool binfo_direct_p (tree);
extern tree build_baselink (tree, tree, tree, tree);
extern tree adjust_result_of_qualified_name_lookup
(tree, tree, tree);
extern tree copied_binfo (tree, tree);
extern tree original_binfo (tree, tree);
extern int shared_member_p (tree);
extern bool any_dependent_bases_p (tree = current_nonlambda_class_type ());
/* The representation of a deferred access check. */
struct GTY(()) deferred_access_check {
/* The base class in which the declaration is referenced. */
tree binfo;
/* The declaration whose access must be checked. */
tree decl;
/* The declaration that should be used in the error message. */
tree diag_decl;
/* The location of this access. */
location_t loc;
};
/* in semantics.c */
extern void push_deferring_access_checks (deferring_kind);
extern void resume_deferring_access_checks (void);
extern void stop_deferring_access_checks (void);
extern void pop_deferring_access_checks (void);
extern vec<deferred_access_check, va_gc> *get_deferred_access_checks (void);
extern void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> *);
extern void pop_to_parent_deferring_access_checks (void);
extern bool perform_access_checks (vec<deferred_access_check, va_gc> *,
tsubst_flags_t);
extern bool perform_deferred_access_checks (tsubst_flags_t);
extern bool perform_or_defer_access_check (tree, tree, tree,
tsubst_flags_t,
access_failure_info *afi = NULL);
/* RAII sentinel to ensures that deferred access checks are popped before
a function returns. */
struct deferring_access_check_sentinel
{
deferring_access_check_sentinel (enum deferring_kind kind = dk_deferred)
{
push_deferring_access_checks (kind);
}
~deferring_access_check_sentinel ()
{
pop_deferring_access_checks ();
}
};
extern int stmts_are_full_exprs_p (void);
extern void init_cp_semantics (void);
extern tree do_poplevel (tree);
extern void break_maybe_infinite_loop (void);
extern void add_decl_expr (tree);
extern tree maybe_cleanup_point_expr_void (tree);
extern tree finish_expr_stmt (tree);
extern tree begin_if_stmt (void);
extern tree finish_if_stmt_cond (tree, tree);
extern tree finish_then_clause (tree);
extern void begin_else_clause (tree);
extern void finish_else_clause (tree);
extern void finish_if_stmt (tree);
extern tree begin_while_stmt (void);
extern void finish_while_stmt_cond (tree, tree, bool, unsigned short);
extern void finish_while_stmt (tree);
extern tree begin_do_stmt (void);
extern void finish_do_body (tree);
extern void finish_do_stmt (tree, tree, bool, unsigned short);
extern tree finish_return_stmt (tree);
extern tree begin_for_scope (tree *);
extern tree begin_for_stmt (tree, tree);
extern void finish_init_stmt (tree);
extern void finish_for_cond (tree, tree, bool, unsigned short);
extern void finish_for_expr (tree, tree);
extern void finish_for_stmt (tree);
extern tree begin_range_for_stmt (tree, tree);
extern void finish_range_for_decl (tree, tree, tree);
extern void finish_range_for_stmt (tree);
extern tree finish_break_stmt (void);
extern tree finish_continue_stmt (void);
extern tree begin_switch_stmt (void);
extern void finish_switch_cond (tree, tree);
extern void finish_switch_stmt (tree);
extern tree finish_goto_stmt (tree);
extern tree begin_try_block (void);
extern void finish_try_block (tree);
extern void finish_handler_sequence (tree);
extern tree begin_function_try_block (tree *);
extern void finish_function_try_block (tree);
extern void finish_function_handler_sequence (tree, tree);
extern void finish_cleanup_try_block (tree);
extern tree begin_handler (void);
extern void finish_handler_parms (tree, tree);
extern void finish_handler (tree);
extern void finish_cleanup (tree, tree);
extern bool is_this_parameter (tree);
enum {
BCS_NORMAL = 0,
BCS_NO_SCOPE = 1,
BCS_TRY_BLOCK = 2,
BCS_FN_BODY = 4,
BCS_TRANSACTION = 8
};
extern tree begin_compound_stmt (unsigned int);
extern void finish_compound_stmt (tree);
extern tree finish_asm_stmt (int, tree, tree, tree, tree,
tree, bool);
extern tree finish_label_stmt (tree);
extern void finish_label_decl (tree);
extern cp_expr finish_parenthesized_expr (cp_expr);
extern tree force_paren_expr (tree);
extern tree maybe_undo_parenthesized_ref (tree);
extern tree finish_non_static_data_member (tree, tree, tree);
extern tree begin_stmt_expr (void);
extern tree finish_stmt_expr_expr (tree, tree);
extern tree finish_stmt_expr (tree, bool);
extern tree stmt_expr_value_expr (tree);
bool empty_expr_stmt_p (tree);
extern cp_expr perform_koenig_lookup (cp_expr, vec<tree, va_gc> *,
tsubst_flags_t);
extern tree finish_call_expr (tree, vec<tree, va_gc> **, bool,
bool, tsubst_flags_t);
extern tree lookup_and_finish_template_variable (tree, tree, tsubst_flags_t = tf_warning_or_error);
extern tree finish_template_variable (tree, tsubst_flags_t = tf_warning_or_error);
extern cp_expr finish_increment_expr (cp_expr, enum tree_code);
extern tree finish_this_expr (void);
extern tree finish_pseudo_destructor_expr (tree, tree, tree, location_t);
extern cp_expr finish_unary_op_expr (location_t, enum tree_code, cp_expr,
tsubst_flags_t);
/* Whether this call to finish_compound_literal represents a C++11 functional
cast or a C99 compound literal. */
enum fcl_t { fcl_functional, fcl_c99 };
extern tree finish_compound_literal (tree, tree, tsubst_flags_t, fcl_t = fcl_functional);
extern tree finish_fname (tree);
extern void finish_translation_unit (void);
extern tree finish_template_type_parm (tree, tree);
extern tree finish_template_template_parm (tree, tree);
extern tree begin_class_definition (tree);
extern void finish_template_decl (tree);
extern tree finish_template_type (tree, tree, int);
extern tree finish_base_specifier (tree, tree, bool);
extern void finish_member_declaration (tree);
extern bool outer_automatic_var_p (tree);
extern tree process_outer_var_ref (tree, tsubst_flags_t, bool force_use = false);
extern cp_expr finish_id_expression (tree, tree, tree,
cp_id_kind *,
bool, bool, bool *,
bool, bool, bool, bool,
const char **,
location_t);
extern tree finish_typeof (tree);
extern tree finish_underlying_type (tree);
extern tree calculate_bases (tree, tsubst_flags_t);
extern tree finish_bases (tree, bool);
extern tree calculate_direct_bases (tree, tsubst_flags_t);
extern tree finish_offsetof (tree, tree, location_t);
extern void finish_decl_cleanup (tree, tree);
extern void finish_eh_cleanup (tree);
extern void emit_associated_thunks (tree);
extern void finish_mem_initializers (tree);
extern tree check_template_template_default_arg (tree);
extern bool expand_or_defer_fn_1 (tree);
extern void expand_or_defer_fn (tree);
extern void add_typedef_to_current_template_for_access_check (tree, tree,
location_t);
extern void check_accessibility_of_qualified_id (tree, tree, tree);
extern tree finish_qualified_id_expr (tree, tree, bool, bool,
bool, bool, tsubst_flags_t);
extern void simplify_aggr_init_expr (tree *);
extern void finalize_nrv (tree *, tree, tree);
extern tree omp_reduction_id (enum tree_code, tree, tree);
extern tree cp_remove_omp_priv_cleanup_stmt (tree *, int *, void *);
extern void cp_check_omp_declare_reduction (tree);
extern void finish_omp_declare_simd_methods (tree);
extern tree finish_omp_clauses (tree, enum c_omp_region_type);
extern tree push_omp_privatization_clauses (bool);
extern void pop_omp_privatization_clauses (tree);
extern void save_omp_privatization_clauses (vec<tree> &);
extern void restore_omp_privatization_clauses (vec<tree> &);
extern void finish_omp_threadprivate (tree);
extern tree begin_omp_structured_block (void);
extern tree finish_omp_structured_block (tree);
extern tree finish_oacc_data (tree, tree);
extern tree finish_oacc_host_data (tree, tree);
extern tree finish_omp_construct (enum tree_code, tree, tree);
extern tree begin_omp_parallel (void);
extern tree finish_omp_parallel (tree, tree);
extern tree begin_omp_task (void);
extern tree finish_omp_task (tree, tree);
extern tree finish_omp_for (location_t, enum tree_code,
tree, tree, tree, tree, tree,
tree, tree, vec<tree> *, tree);
extern void finish_omp_atomic (enum tree_code, enum tree_code,
tree, tree, tree, tree, tree,
bool);
extern void finish_omp_barrier (void);
extern void finish_omp_flush (void);
extern void finish_omp_taskwait (void);
extern void finish_omp_taskyield (void);
extern void finish_omp_cancel (tree);
extern void finish_omp_cancellation_point (tree);
extern tree omp_privatize_field (tree, bool);
extern tree begin_transaction_stmt (location_t, tree *, int);
extern void finish_transaction_stmt (tree, tree, int, tree);
extern tree build_transaction_expr (location_t, tree, int, tree);
extern bool cxx_omp_create_clause_info (tree, tree, bool, bool,
bool, bool);
extern tree baselink_for_fns (tree);
extern void finish_static_assert (tree, tree, location_t,
bool);
extern tree finish_decltype_type (tree, bool, tsubst_flags_t);
extern tree finish_trait_expr (enum cp_trait_kind, tree, tree);
extern tree build_lambda_expr (void);
extern tree build_lambda_object (tree);
extern tree begin_lambda_type (tree);
extern tree lambda_capture_field_type (tree, bool, bool);
extern tree lambda_return_type (tree);
extern tree lambda_proxy_type (tree);
extern tree lambda_function (tree);
extern void apply_deduced_return_type (tree, tree);
extern tree add_capture (tree, tree, tree, bool, bool);
extern tree add_default_capture (tree, tree, tree);
extern void insert_capture_proxy (tree);
extern void insert_pending_capture_proxies (void);
extern bool is_capture_proxy (tree);
extern bool is_normal_capture_proxy (tree);
extern bool is_constant_capture_proxy (tree);
extern void register_capture_members (tree);
extern tree lambda_expr_this_capture (tree, bool);
extern void maybe_generic_this_capture (tree, tree);
extern tree maybe_resolve_dummy (tree, bool);
extern tree current_nonlambda_function (void);
extern tree nonlambda_method_basetype (void);
extern tree current_nonlambda_scope (void);
extern tree current_lambda_expr (void);
extern bool generic_lambda_fn_p (tree);
extern tree do_dependent_capture (tree, bool = false);
extern bool lambda_fn_in_template_p (tree);
extern void maybe_add_lambda_conv_op (tree);
extern bool is_lambda_ignored_entity (tree);
extern bool lambda_static_thunk_p (tree);
extern tree finish_builtin_launder (location_t, tree,
tsubst_flags_t);
extern void start_lambda_scope (tree);
extern void record_lambda_scope (tree);
extern void record_null_lambda_scope (tree);
extern void finish_lambda_scope (void);
extern tree start_lambda_function (tree fn, tree lambda_expr);
extern void finish_lambda_function (tree body);
/* in tree.c */
extern int cp_tree_operand_length (const_tree);
extern int cp_tree_code_length (enum tree_code);
extern void cp_free_lang_data (tree t);
extern tree force_target_expr (tree, tree, tsubst_flags_t);
extern tree build_target_expr_with_type (tree, tree, tsubst_flags_t);
extern void lang_check_failed (const char *, int,
const char *) ATTRIBUTE_NORETURN
ATTRIBUTE_COLD;
extern tree stabilize_expr (tree, tree *);
extern void stabilize_call (tree, tree *);
extern bool stabilize_init (tree, tree *);
extern tree add_stmt_to_compound (tree, tree);
extern void init_tree (void);
extern bool pod_type_p (const_tree);
extern bool layout_pod_type_p (const_tree);
extern bool std_layout_type_p (const_tree);
extern bool trivial_type_p (const_tree);
extern bool trivially_copyable_p (const_tree);
extern bool type_has_unique_obj_representations (const_tree);
extern bool scalarish_type_p (const_tree);
extern bool type_has_nontrivial_default_init (const_tree);
extern bool type_has_nontrivial_copy_init (const_tree);
extern void maybe_warn_parm_abi (tree, location_t);
extern bool class_tmpl_impl_spec_p (const_tree);
extern int zero_init_p (const_tree);
extern bool check_abi_tag_redeclaration (const_tree, const_tree,
const_tree);
extern bool check_abi_tag_args (tree, tree);
extern tree strip_typedefs (tree, bool * = NULL);
extern tree strip_typedefs_expr (tree, bool * = NULL);
extern tree copy_binfo (tree, tree, tree,
tree *, int);
extern int member_p (const_tree);
extern cp_lvalue_kind real_lvalue_p (const_tree);
extern cp_lvalue_kind lvalue_kind (const_tree);
extern bool glvalue_p (const_tree);
extern bool obvalue_p (const_tree);
extern bool xvalue_p (const_tree);
extern bool bitfield_p (const_tree);
extern tree cp_stabilize_reference (tree);
extern bool builtin_valid_in_constant_expr_p (const_tree);
extern tree build_min (enum tree_code, tree, ...);
extern tree build_min_nt_loc (location_t, enum tree_code,
...);
extern tree build_min_non_dep (enum tree_code, tree, ...);
extern tree build_min_non_dep_op_overload (enum tree_code, tree, tree, ...);
extern tree build_min_nt_call_vec (tree, vec<tree, va_gc> *);
extern tree build_min_non_dep_call_vec (tree, tree, vec<tree, va_gc> *);
extern vec<tree, va_gc>* vec_copy_and_insert (vec<tree, va_gc>*, tree, unsigned);
extern tree build_cplus_new (tree, tree, tsubst_flags_t);
extern tree build_aggr_init_expr (tree, tree);
extern tree get_target_expr (tree);
extern tree get_target_expr_sfinae (tree, tsubst_flags_t);
extern tree build_cplus_array_type (tree, tree);
extern tree build_array_of_n_type (tree, int);
extern bool array_of_runtime_bound_p (tree);
extern bool vla_type_p (tree);
extern tree build_array_copy (tree);
extern tree build_vec_init_expr (tree, tree, tsubst_flags_t);
extern void diagnose_non_constexpr_vec_init (tree);
extern tree hash_tree_cons (tree, tree, tree);
extern tree hash_tree_chain (tree, tree);
extern tree build_qualified_name (tree, tree, tree, bool);
extern tree build_ref_qualified_type (tree, cp_ref_qualifier);
inline tree ovl_first (tree) ATTRIBUTE_PURE;
extern tree ovl_make (tree fn,
tree next = NULL_TREE);
extern tree ovl_insert (tree fn, tree maybe_ovl,
bool using_p = false);
extern tree ovl_skip_hidden (tree) ATTRIBUTE_PURE;
extern void lookup_mark (tree lookup, bool val);
extern tree lookup_add (tree fns, tree lookup);
extern tree lookup_maybe_add (tree fns, tree lookup,
bool deduping);
extern void lookup_keep (tree lookup, bool keep);
extern void lookup_list_keep (tree list, bool keep);
extern int is_overloaded_fn (tree) ATTRIBUTE_PURE;
extern bool really_overloaded_fn (tree) ATTRIBUTE_PURE;
extern tree dependent_name (tree);
extern tree get_fns (tree) ATTRIBUTE_PURE;
extern tree get_first_fn (tree) ATTRIBUTE_PURE;
extern tree ovl_scope (tree);
extern const char *cxx_printable_name (tree, int);
extern const char *cxx_printable_name_translate (tree, int);
extern tree canonical_eh_spec (tree);
extern tree build_exception_variant (tree, tree);
extern tree bind_template_template_parm (tree, tree);
extern tree array_type_nelts_total (tree);
extern tree array_type_nelts_top (tree);
extern tree break_out_target_exprs (tree, bool = false);
extern tree build_ctor_subob_ref (tree, tree, tree);
extern tree replace_placeholders (tree, tree, bool * = NULL);
extern bool find_placeholders (tree);
extern tree get_type_decl (tree);
extern tree decl_namespace_context (tree);
extern bool decl_anon_ns_mem_p (const_tree);
extern tree lvalue_type (tree);
extern tree error_type (tree);
extern int varargs_function_p (const_tree);
extern bool cp_tree_equal (tree, tree);
extern tree no_linkage_check (tree, bool);
extern void debug_binfo (tree);
extern tree build_dummy_object (tree);
extern tree maybe_dummy_object (tree, tree *);
extern int is_dummy_object (const_tree);
extern const struct attribute_spec cxx_attribute_table[];
extern tree make_ptrmem_cst (tree, tree);
extern tree cp_build_type_attribute_variant (tree, tree);
extern tree cp_build_reference_type (tree, bool);
extern tree move (tree);
extern tree cp_build_qualified_type_real (tree, int, tsubst_flags_t);
#define cp_build_qualified_type(TYPE, QUALS) \
cp_build_qualified_type_real ((TYPE), (QUALS), tf_warning_or_error)
extern bool cv_qualified_p (const_tree);
extern tree cv_unqualified (tree);
extern special_function_kind special_function_p (const_tree);
extern int count_trees (tree);
extern int char_type_p (tree);
extern void verify_stmt_tree (tree);
extern linkage_kind decl_linkage (tree);
extern duration_kind decl_storage_duration (tree);
extern tree cp_walk_subtrees (tree*, int*, walk_tree_fn,
void*, hash_set<tree> *);
#define cp_walk_tree(tp,func,data,pset) \
walk_tree_1 (tp, func, data, pset, cp_walk_subtrees)
#define cp_walk_tree_without_duplicates(tp,func,data) \
walk_tree_without_duplicates_1 (tp, func, data, cp_walk_subtrees)
extern tree rvalue (tree);
extern tree convert_bitfield_to_declared_type (tree);
extern tree cp_save_expr (tree);
extern bool cast_valid_in_integral_constant_expression_p (tree);
extern bool cxx_type_hash_eq (const_tree, const_tree);
extern tree cxx_copy_lang_qualifiers (const_tree, const_tree);
extern void cxx_print_statistics (void);
extern bool maybe_warn_zero_as_null_pointer_constant (tree, location_t);
extern void cp_warn_deprecated_use (tree);
/* in ptree.c */
extern void cxx_print_xnode (FILE *, tree, int);
extern void cxx_print_decl (FILE *, tree, int);
extern void cxx_print_type (FILE *, tree, int);
extern void cxx_print_identifier (FILE *, tree, int);
extern void cxx_print_error_function (diagnostic_context *,
const char *,
struct diagnostic_info *);
/* in typeck.c */
extern bool cxx_mark_addressable (tree, bool = false);
extern int string_conv_p (const_tree, const_tree, int);
extern tree cp_truthvalue_conversion (tree);
extern tree condition_conversion (tree);
extern tree require_complete_type (tree);
extern tree require_complete_type_sfinae (tree, tsubst_flags_t);
extern tree complete_type (tree);
extern tree complete_type_or_else (tree, tree);
extern tree complete_type_or_maybe_complain (tree, tree, tsubst_flags_t);
inline bool type_unknown_p (const_tree);
enum { ce_derived, ce_type, ce_normal, ce_exact };
extern bool comp_except_specs (const_tree, const_tree, int);
extern bool comptypes (tree, tree, int);
extern bool same_type_ignoring_top_level_qualifiers_p (tree, tree);
extern bool compparms (const_tree, const_tree);
extern int comp_cv_qualification (const_tree, const_tree);
extern int comp_cv_qualification (int, int);
extern int comp_cv_qual_signature (tree, tree);
extern tree cxx_sizeof_or_alignof_expr (tree, enum tree_code, bool);
extern tree cxx_sizeof_or_alignof_type (tree, enum tree_code, bool, bool);
extern tree cxx_alignas_expr (tree);
extern tree cxx_sizeof_nowarn (tree);
extern tree is_bitfield_expr_with_lowered_type (const_tree);
extern tree unlowered_expr_type (const_tree);
extern tree decay_conversion (tree,
tsubst_flags_t,
bool = true);
extern tree build_class_member_access_expr (cp_expr, tree, tree, bool,
tsubst_flags_t);
extern tree finish_class_member_access_expr (cp_expr, tree, bool,
tsubst_flags_t);
extern tree build_x_indirect_ref (location_t, tree,
ref_operator, tsubst_flags_t);
extern tree cp_build_indirect_ref (tree, ref_operator,
tsubst_flags_t);
extern tree cp_build_fold_indirect_ref (tree);
extern tree build_array_ref (location_t, tree, tree);
extern tree cp_build_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree get_member_function_from_ptrfunc (tree *, tree, tsubst_flags_t);
extern tree cp_build_function_call_nary (tree, tsubst_flags_t, ...)
ATTRIBUTE_SENTINEL;
extern tree cp_build_function_call_vec (tree, vec<tree, va_gc> **,
tsubst_flags_t);
extern tree build_x_binary_op (location_t,
enum tree_code, tree,
enum tree_code, tree,
enum tree_code, tree *,
tsubst_flags_t);
extern tree build_x_array_ref (location_t, tree, tree,
tsubst_flags_t);
extern tree build_x_unary_op (location_t,
enum tree_code, cp_expr,
tsubst_flags_t);
extern tree cp_build_addressof (location_t, tree,
tsubst_flags_t);
extern tree cp_build_addr_expr (tree, tsubst_flags_t);
extern tree cp_build_unary_op (enum tree_code, tree, bool,
tsubst_flags_t);
extern tree genericize_compound_lvalue (tree);
extern tree unary_complex_lvalue (enum tree_code, tree);
extern tree build_x_conditional_expr (location_t, tree, tree, tree,
tsubst_flags_t);
extern tree build_x_compound_expr_from_list (tree, expr_list_kind,
tsubst_flags_t);
extern tree build_x_compound_expr_from_vec (vec<tree, va_gc> *,
const char *, tsubst_flags_t);
extern tree build_x_compound_expr (location_t, tree, tree,
tsubst_flags_t);
extern tree build_compound_expr (location_t, tree, tree);
extern tree cp_build_compound_expr (tree, tree, tsubst_flags_t);
extern tree build_static_cast (tree, tree, tsubst_flags_t);
extern tree build_reinterpret_cast (tree, tree, tsubst_flags_t);
extern tree build_const_cast (tree, tree, tsubst_flags_t);
extern tree build_c_cast (location_t, tree, tree);
extern cp_expr build_c_cast (location_t loc, tree type,
cp_expr expr);
extern tree cp_build_c_cast (tree, tree, tsubst_flags_t);
extern cp_expr build_x_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree cp_build_modify_expr (location_t, tree,
enum tree_code, tree,
tsubst_flags_t);
extern tree convert_for_initialization (tree, tree, tree, int,
impl_conv_rhs, tree, int,
tsubst_flags_t);
extern int comp_ptr_ttypes (tree, tree);
extern bool comp_ptr_ttypes_const (tree, tree);
extern bool error_type_p (const_tree);
extern bool ptr_reasonably_similar (const_tree, const_tree);
extern tree build_ptrmemfunc (tree, tree, int, bool,
tsubst_flags_t);
extern int cp_type_quals (const_tree);
extern int type_memfn_quals (const_tree);
extern cp_ref_qualifier type_memfn_rqual (const_tree);
extern tree apply_memfn_quals (tree, cp_cv_quals, cp_ref_qualifier);
extern bool cp_has_mutable_p (const_tree);
extern bool at_least_as_qualified_p (const_tree, const_tree);
extern void cp_apply_type_quals_to_decl (int, tree);
extern tree build_ptrmemfunc1 (tree, tree, tree);
extern void expand_ptrmemfunc_cst (tree, tree *, tree *);
extern tree type_after_usual_arithmetic_conversions (tree, tree);
extern tree common_pointer_type (tree, tree);
extern tree composite_pointer_type (tree, tree, tree, tree,
composite_pointer_operation,
tsubst_flags_t);
extern tree merge_types (tree, tree);
extern tree strip_array_domain (tree);
extern tree check_return_expr (tree, bool *);
extern tree cp_build_binary_op (location_t,
enum tree_code, tree, tree,
tsubst_flags_t);
extern tree build_x_vec_perm_expr (location_t,
tree, tree, tree,
tsubst_flags_t);
#define cxx_sizeof(T) cxx_sizeof_or_alignof_type (T, SIZEOF_EXPR, false, true)
extern tree build_simple_component_ref (tree, tree);
extern tree build_ptrmemfunc_access_expr (tree, tree);
extern tree build_address (tree);
extern tree build_nop (tree, tree);
extern tree non_reference (tree);
extern tree lookup_anon_field (tree, tree);
extern bool invalid_nonstatic_memfn_p (location_t, tree,
tsubst_flags_t);
extern tree convert_member_func_to_ptr (tree, tree, tsubst_flags_t);
extern tree convert_ptrmem (tree, tree, bool, bool,
tsubst_flags_t);
extern int lvalue_or_else (tree, enum lvalue_use,
tsubst_flags_t);
extern void check_template_keyword (tree);
extern bool check_raw_literal_operator (const_tree decl);
extern bool check_literal_operator_args (const_tree, bool *, bool *);
extern void maybe_warn_about_useless_cast (tree, tree, tsubst_flags_t);
extern tree cp_perform_integral_promotions (tree, tsubst_flags_t);
extern tree finish_left_unary_fold_expr (tree, int);
extern tree finish_right_unary_fold_expr (tree, int);
extern tree finish_binary_fold_expr (tree, tree, int);
/* in typeck2.c */
extern void require_complete_eh_spec_types (tree, tree);
extern void cxx_incomplete_type_diagnostic (location_t, const_tree,
const_tree, diagnostic_t);
inline void
cxx_incomplete_type_diagnostic (const_tree value, const_tree type,
diagnostic_t diag_kind)
{
cxx_incomplete_type_diagnostic (EXPR_LOC_OR_LOC (value, input_location),
value, type, diag_kind);
}
extern void cxx_incomplete_type_error (location_t, const_tree,
const_tree);
inline void
cxx_incomplete_type_error (const_tree value, const_tree type)
{
cxx_incomplete_type_diagnostic (value, type, DK_ERROR);
}
extern void cxx_incomplete_type_inform (const_tree);
extern tree error_not_base_type (tree, tree);
extern tree binfo_or_else (tree, tree);
extern void cxx_readonly_error (tree, enum lvalue_use);
extern void complete_type_check_abstract (tree);
extern int abstract_virtuals_error (tree, tree);
extern int abstract_virtuals_error (abstract_class_use, tree);
extern int abstract_virtuals_error_sfinae (tree, tree, tsubst_flags_t);
extern int abstract_virtuals_error_sfinae (abstract_class_use, tree, tsubst_flags_t);
extern tree store_init_value (tree, tree, vec<tree, va_gc>**, int);
extern tree split_nonconstant_init (tree, tree);
extern bool check_narrowing (tree, tree, tsubst_flags_t);
extern tree digest_init (tree, tree, tsubst_flags_t);
extern tree digest_init_flags (tree, tree, int, tsubst_flags_t);
extern tree digest_nsdmi_init (tree, tree, tsubst_flags_t);
extern tree build_scoped_ref (tree, tree, tree *);
extern tree build_x_arrow (location_t, tree,
tsubst_flags_t);
extern tree build_m_component_ref (tree, tree, tsubst_flags_t);
extern tree build_functional_cast (tree, tree, tsubst_flags_t);
extern tree add_exception_specifier (tree, tree, int);
extern tree merge_exception_specifiers (tree, tree);
/* in mangle.c */
extern void init_mangle (void);
extern void mangle_decl (tree);
extern const char *mangle_type_string (tree);
extern tree mangle_typeinfo_for_type (tree);
extern tree mangle_typeinfo_string_for_type (tree);
extern tree mangle_vtbl_for_type (tree);
extern tree mangle_vtt_for_type (tree);
extern tree mangle_ctor_vtbl_for_type (tree, tree);
extern tree mangle_thunk (tree, int, tree, tree, tree);
extern tree mangle_guard_variable (tree);
extern tree mangle_tls_init_fn (tree);
extern tree mangle_tls_wrapper_fn (tree);
extern bool decl_tls_wrapper_p (tree);
extern tree mangle_ref_init_variable (tree);
extern char * get_mangled_vtable_map_var_name (tree);
extern bool mangle_return_type_p (tree);
extern tree mangle_decomp (tree, vec<tree> &);
/* in dump.c */
extern bool cp_dump_tree (void *, tree);
/* In cp/cp-objcp-common.c. */
extern alias_set_type cxx_get_alias_set (tree);
extern bool cxx_warn_unused_global_decl (const_tree);
extern size_t cp_tree_size (enum tree_code);
extern bool cp_var_mod_type_p (tree, tree);
extern void cxx_initialize_diagnostics (diagnostic_context *);
extern int cxx_types_compatible_p (tree, tree);
extern void init_shadowed_var_for_decl (void);
extern bool cxx_block_may_fallthru (const_tree);
/* in cp-gimplify.c */
extern int cp_gimplify_expr (tree *, gimple_seq *,
gimple_seq *);
extern void cp_genericize (tree);
extern bool cxx_omp_const_qual_no_mutable (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing_1 (tree);
extern enum omp_clause_default_kind cxx_omp_predetermined_sharing (tree);
extern tree cxx_omp_clause_default_ctor (tree, tree, tree);
extern tree cxx_omp_clause_copy_ctor (tree, tree, tree);
extern tree cxx_omp_clause_assign_op (tree, tree, tree);
extern tree cxx_omp_clause_dtor (tree, tree);
extern void cxx_omp_finish_clause (tree, gimple_seq *);
extern bool cxx_omp_privatize_by_reference (const_tree);
extern bool cxx_omp_disregard_value_expr (tree, bool);
extern void cp_fold_function (tree);
extern tree cp_fully_fold (tree);
extern void clear_fold_cache (void);
/* in name-lookup.c */
extern void suggest_alternatives_for (location_t, tree, bool);
extern bool suggest_alternative_in_explicit_scope (location_t, tree, tree);
extern tree strip_using_decl (tree);
/* Tell the binding oracle what kind of binding we are looking for. */
enum cp_oracle_request
{
CP_ORACLE_IDENTIFIER
};
/* If this is non-NULL, then it is a "binding oracle" which can lazily
create bindings when needed by the C compiler. The oracle is told
the name and type of the binding to create. It can call pushdecl
or the like to ensure the binding is visible; or do nothing,
leaving the binding untouched. c-decl.c takes note of when the
oracle has been called and will not call it again if it fails to
create a given binding. */
typedef void cp_binding_oracle_function (enum cp_oracle_request, tree identifier);
extern cp_binding_oracle_function *cp_binding_oracle;
/* in constraint.cc */
extern void init_constraint_processing ();
extern bool constraint_p (tree);
extern tree conjoin_constraints (tree, tree);
extern tree conjoin_constraints (tree);
extern tree get_constraints (tree);
extern void set_constraints (tree, tree);
extern void remove_constraints (tree);
extern tree current_template_constraints (void);
extern tree associate_classtype_constraints (tree);
extern tree build_constraints (tree, tree);
extern tree get_shorthand_constraints (tree);
extern tree build_concept_check (tree, tree, tree = NULL_TREE);
extern tree build_constrained_parameter (tree, tree, tree = NULL_TREE);
extern tree make_constrained_auto (tree, tree);
extern void placeholder_extract_concept_and_args (tree, tree&, tree&);
extern bool equivalent_placeholder_constraints (tree, tree);
extern hashval_t hash_placeholder_constraint (tree);
extern bool deduce_constrained_parameter (tree, tree&, tree&);
extern tree resolve_constraint_check (tree);
extern tree check_function_concept (tree);
extern tree finish_template_introduction (tree, tree);
extern bool valid_requirements_p (tree);
extern tree finish_concept_name (tree);
extern tree finish_shorthand_constraint (tree, tree);
extern tree finish_requires_expr (tree, tree);
extern tree finish_simple_requirement (tree);
extern tree finish_type_requirement (tree);
extern tree finish_compound_requirement (tree, tree, bool);
extern tree finish_nested_requirement (tree);
extern void check_constrained_friend (tree, tree);
extern tree tsubst_requires_expr (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint (tree, tree, tsubst_flags_t, tree);
extern tree tsubst_constraint_info (tree, tree, tsubst_flags_t, tree);
extern bool function_concept_check_p (tree);
extern tree normalize_expression (tree);
extern tree expand_concept (tree, tree);
extern bool expanding_concept ();
extern tree evaluate_constraints (tree, tree);
extern tree evaluate_function_concept (tree, tree);
extern tree evaluate_variable_concept (tree, tree);
extern tree evaluate_constraint_expression (tree, tree);
extern bool constraints_satisfied_p (tree);
extern bool constraints_satisfied_p (tree, tree);
extern tree lookup_constraint_satisfaction (tree, tree);
extern tree memoize_constraint_satisfaction (tree, tree, tree);
extern tree lookup_concept_satisfaction (tree, tree);
extern tree memoize_concept_satisfaction (tree, tree, tree);
extern tree get_concept_expansion (tree, tree);
extern tree save_concept_expansion (tree, tree, tree);
extern bool* lookup_subsumption_result (tree, tree);
extern bool save_subsumption_result (tree, tree, bool);
extern bool equivalent_constraints (tree, tree);
extern bool equivalently_constrained (tree, tree);
extern bool subsumes_constraints (tree, tree);
extern bool strictly_subsumes (tree, tree);
extern int more_constrained (tree, tree);
extern void diagnose_constraints (location_t, tree, tree);
/* in logic.cc */
extern tree decompose_conclusions (tree);
extern bool subsumes (tree, tree);
/* In class.c */
extern void cp_finish_injected_record_type (tree);
/* in vtable-class-hierarchy.c */
extern void vtv_compute_class_hierarchy_transitive_closure (void);
extern void vtv_generate_init_routine (void);
extern void vtv_save_class_info (tree);
extern void vtv_recover_class_info (void);
extern void vtv_build_vtable_verify_fndecl (void);
/* In constexpr.c */
extern void fini_constexpr (void);
extern bool literal_type_p (tree);
extern tree register_constexpr_fundef (tree, tree);
extern bool is_valid_constexpr_fn (tree, bool);
extern bool check_constexpr_ctor_body (tree, tree, bool);
extern tree constexpr_fn_retval (tree);
extern tree ensure_literal_type_for_constexpr_object (tree);
extern bool potential_constant_expression (tree);
extern bool is_constant_expression (tree);
extern bool is_nondependent_constant_expression (tree);
extern bool is_nondependent_static_init_expression (tree);
extern bool is_static_init_expression (tree);
extern bool potential_rvalue_constant_expression (tree);
extern bool require_potential_constant_expression (tree);
extern bool require_constant_expression (tree);
extern bool require_rvalue_constant_expression (tree);
extern bool require_potential_rvalue_constant_expression (tree);
extern tree cxx_constant_value (tree, tree = NULL_TREE);
extern tree cxx_constant_init (tree, tree = NULL_TREE);
extern tree maybe_constant_value (tree, tree = NULL_TREE);
extern tree maybe_constant_init (tree, tree = NULL_TREE);
extern tree fold_non_dependent_expr (tree, tsubst_flags_t = tf_none);
extern tree fold_simple (tree);
extern bool is_sub_constant_expr (tree);
extern bool reduced_constant_expression_p (tree);
extern bool is_instantiation_of_constexpr (tree);
extern bool var_in_constexpr_fn (tree);
extern bool var_in_maybe_constexpr_fn (tree);
extern void explain_invalid_constexpr_fn (tree);
extern vec<tree> cx_error_context (void);
extern tree fold_sizeof_expr (tree);
extern void clear_cv_and_fold_caches (void);
/* In cp-ubsan.c */
extern void cp_ubsan_maybe_instrument_member_call (tree);
extern void cp_ubsan_instrument_member_accesses (tree *);
extern tree cp_ubsan_maybe_instrument_downcast (location_t, tree, tree, tree);
extern tree cp_ubsan_maybe_instrument_cast_to_vbase (location_t, tree, tree);
extern void cp_ubsan_maybe_initialize_vtbl_ptrs (tree);
/* Inline bodies. */
inline tree
ovl_first (tree node)
{
while (TREE_CODE (node) == OVERLOAD)
node = OVL_FUNCTION (node);
return node;
}
inline bool
type_unknown_p (const_tree expr)
{
return TREE_TYPE (expr) == unknown_type_node;
}
inline hashval_t
named_decl_hash::hash (const value_type decl)
{
tree name = OVL_NAME (decl);
return name ? IDENTIFIER_HASH_VALUE (name) : 0;
}
inline bool
named_decl_hash::equal (const value_type existing, compare_type candidate)
{
tree name = OVL_NAME (existing);
return candidate == name;
}
inline bool
null_node_p (const_tree expr)
{
STRIP_ANY_LOCATION_WRAPPER (expr);
return expr == null_node;
}
#if CHECKING_P
namespace selftest {
extern void run_cp_tests (void);
/* Declarations for specific families of tests within cp,
by source file, in alphabetical order. */
extern void cp_pt_c_tests ();
extern void cp_tree_c_tests (void);
} // namespace selftest
#endif /* #if CHECKING_P */
/* -- end of C++ */
#endif /* ! GCC_CP_TREE_H */
|
perft.h | // perft.h - PIGEON CHESS ENGINE (c) 2012-2016 Stuart Riffle
namespace Pigeon {
#ifndef PIGEON_PERFT_H__
#define PIGEON_PERFT_H__
struct Perft
{
static void GatherPerftParallelPositions( const Position& pos, int depth, std::vector< Position >* dest )
{
MoveList valid;
valid.FindMoves( pos );
for( int i = 0; i < valid.mCount; i++ )
{
Position next = pos;
next.Step( valid.mMove[i] );
if( depth == (PERFT_PARALLEL_MAX + 1) )
dest->push_back( next );
else
Perft::GatherPerftParallelPositions( next, depth - 1, dest );
}
}
static u64 CalcPerftParallel( const Position& pos, int depth )
{
std::vector< Position > positions( 16384 );
Perft::GatherPerftParallelPositions( pos, depth, &positions );
u64 total = 0;
//printf( "info string perft parallel positions %d\n", (int) positions.size() );
#pragma omp parallel for reduction(+: total) schedule(dynamic)
for( int i = 0; i < (int) positions.size(); i++ )
{
u64 subtotal = Perft::CalcPerftInternal( positions[i], PERFT_PARALLEL_MAX );
total = total + subtotal;
}
return( total );
}
static u64 CalcPerftInternal( const Position& pos, int depth )
{
if( (depth > PERFT_PARALLEL_MAX) && (depth <= PERFT_PARALLEL_MAX + 3) )
{
return( Perft::CalcPerftParallel( pos, depth ) );
}
MoveList valid;
valid.FindMoves( pos );
u64 total = 0;
for( int i = 0; i < valid.mCount; i++ )
{
Position next = pos;
next.Step( valid.mMove[i] );
if( depth == 2 )
{
MoveList dummy;
total += dummy.FindMoves( next );
}
else
{
total += Perft::CalcPerftInternal( next, depth - 1 );
}
}
return( total );
}
static u64 CalcPerft( const Position& pos, int depth )
{
if( depth < 2 )
{
MoveList dummy;
return( dummy.FindMoves( pos ) );
}
return( Perft::CalcPerftInternal( pos, depth ) );
}
static void DividePerft( const Position& pos, int depth )
{
MoveList valid;
valid.FindMoves( pos );
u64 total = 0;
for( int i = 0; i < valid.mCount; i++ )
{
Position next = pos;
next.Step( valid.mMove[i] );
u64 count = (depth > 1)? Perft::CalcPerft( next, depth - 1 ) : 1;
total += count;
printf( "info string divide %d ", depth );
FEN::PrintMoveSpec( valid.mMove[i] );
printf( " %" PRId64 "\n", count );
}
printf( "info string divide %d total %" PRId64 "\n", depth, total );
}
};
#endif // PIGEON_PERFT_H__
};
|
GB_unop__trunc_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__trunc_fc64_fc64)
// op(A') function: GB (_unop_tran__trunc_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: GxB_FC64_t cij = aij
// unaryop: cij = GB_ctrunc (aij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC64_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GB_ctrunc (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC64_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC64_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC64_t z = aij ; \
Cx [pC] = GB_ctrunc (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TRUNC || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__trunc_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_ctrunc (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC64_t aij = Ax [p] ;
GxB_FC64_t z = aij ;
Cx [p] = GB_ctrunc (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__trunc_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
Stmt.h | //===- Stmt.h - Classes for representing statements -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Stmt interface and subclasses.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_AST_STMT_H
#define LLVM_CLANG_AST_STMT_H
#include "clang/AST/DeclGroup.h"
#include "clang/AST/StmtIterator.h"
#include "clang/Basic/CapturedStmt.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
#include <iterator>
#include <string>
namespace llvm {
class FoldingSetNodeID;
} // namespace llvm
namespace clang {
class ASTContext;
class Attr;
class CapturedDecl;
class Decl;
class Expr;
class AddrLabelExpr;
class LabelDecl;
class ODRHash;
class PrinterHelper;
struct PrintingPolicy;
class RecordDecl;
class SourceManager;
class StringLiteral;
class Token;
class VarDecl;
//===----------------------------------------------------------------------===//
// AST classes for statements.
//===----------------------------------------------------------------------===//
/// Stmt - This represents one statement.
///
class alignas(void *) Stmt {
public:
enum StmtClass {
NoStmtClass = 0,
#define STMT(CLASS, PARENT) CLASS##Class,
#define STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class,
#define LAST_STMT_RANGE(BASE, FIRST, LAST) \
first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class
#define ABSTRACT_STMT(STMT)
#include "clang/AST/StmtNodes.inc"
};
// Make vanilla 'new' and 'delete' illegal for Stmts.
protected:
friend class ASTStmtReader;
friend class ASTStmtWriter;
void *operator new(size_t bytes) noexcept {
llvm_unreachable("Stmts cannot be allocated with regular 'new'.");
}
void operator delete(void *data) noexcept {
llvm_unreachable("Stmts cannot be released with regular 'delete'.");
}
//===--- Statement bitfields classes ---===//
class StmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class Stmt;
/// The statement class.
unsigned sClass : 8;
/// This bit is set only for the Stmts that are the structured-block of
/// OpenMP executable directives. Directives that have a structured block
/// are called "non-standalone" directives.
/// I.e. those returned by OMPExecutableDirective::getStructuredBlock().
unsigned IsOMPStructuredBlock : 1;
};
enum { NumStmtBits = 9 };
class NullStmtBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class NullStmt;
unsigned : NumStmtBits;
/// True if the null statement was preceded by an empty macro, e.g:
/// @code
/// #define CALL(x)
/// CALL(0);
/// @endcode
unsigned HasLeadingEmptyMacro : 1;
/// The location of the semi-colon.
SourceLocation SemiLoc;
};
class CompoundStmtBitfields {
friend class ASTStmtReader;
friend class CompoundStmt;
unsigned : NumStmtBits;
unsigned NumStmts : 32 - NumStmtBits;
/// The location of the opening "{".
SourceLocation LBraceLoc;
};
class LabelStmtBitfields {
friend class LabelStmt;
unsigned : NumStmtBits;
SourceLocation IdentLoc;
};
class AttributedStmtBitfields {
friend class ASTStmtReader;
friend class AttributedStmt;
unsigned : NumStmtBits;
/// Number of attributes.
unsigned NumAttrs : 32 - NumStmtBits;
/// The location of the attribute.
SourceLocation AttrLoc;
};
class IfStmtBitfields {
friend class ASTStmtReader;
friend class IfStmt;
unsigned : NumStmtBits;
/// True if this if statement is a constexpr if.
unsigned IsConstexpr : 1;
/// True if this if statement has storage for an else statement.
unsigned HasElse : 1;
/// True if this if statement has storage for a variable declaration.
unsigned HasVar : 1;
/// True if this if statement has storage for an init statement.
unsigned HasInit : 1;
/// The location of the "if".
SourceLocation IfLoc;
};
class SwitchStmtBitfields {
friend class SwitchStmt;
unsigned : NumStmtBits;
/// True if the SwitchStmt has storage for an init statement.
unsigned HasInit : 1;
/// True if the SwitchStmt has storage for a condition variable.
unsigned HasVar : 1;
/// If the SwitchStmt is a switch on an enum value, records whether all
/// the enum values were covered by CaseStmts. The coverage information
/// value is meant to be a hint for possible clients.
unsigned AllEnumCasesCovered : 1;
/// The location of the "switch".
SourceLocation SwitchLoc;
};
class WhileStmtBitfields {
friend class ASTStmtReader;
friend class WhileStmt;
unsigned : NumStmtBits;
/// True if the WhileStmt has storage for a condition variable.
unsigned HasVar : 1;
/// The location of the "while".
SourceLocation WhileLoc;
};
class DoStmtBitfields {
friend class DoStmt;
unsigned : NumStmtBits;
/// The location of the "do".
SourceLocation DoLoc;
};
class ForStmtBitfields {
friend class ForStmt;
unsigned : NumStmtBits;
/// The location of the "for".
SourceLocation ForLoc;
};
class GotoStmtBitfields {
friend class GotoStmt;
friend class IndirectGotoStmt;
unsigned : NumStmtBits;
/// The location of the "goto".
SourceLocation GotoLoc;
};
class ContinueStmtBitfields {
friend class ContinueStmt;
unsigned : NumStmtBits;
/// The location of the "continue".
SourceLocation ContinueLoc;
};
class BreakStmtBitfields {
friend class BreakStmt;
unsigned : NumStmtBits;
/// The location of the "break".
SourceLocation BreakLoc;
};
class ReturnStmtBitfields {
friend class ReturnStmt;
unsigned : NumStmtBits;
/// True if this ReturnStmt has storage for an NRVO candidate.
unsigned HasNRVOCandidate : 1;
/// The location of the "return".
SourceLocation RetLoc;
};
class SwitchCaseBitfields {
friend class SwitchCase;
friend class CaseStmt;
unsigned : NumStmtBits;
/// Used by CaseStmt to store whether it is a case statement
/// of the form case LHS ... RHS (a GNU extension).
unsigned CaseStmtIsGNURange : 1;
/// The location of the "case" or "default" keyword.
SourceLocation KeywordLoc;
};
//===--- Expression bitfields classes ---===//
class ExprBitfields {
friend class ASTStmtReader; // deserialization
friend class AtomicExpr; // ctor
friend class BlockDeclRefExpr; // ctor
friend class CallExpr; // ctor
friend class CXXConstructExpr; // ctor
friend class CXXDependentScopeMemberExpr; // ctor
friend class CXXNewExpr; // ctor
friend class CXXUnresolvedConstructExpr; // ctor
friend class DeclRefExpr; // computeDependence
friend class DependentScopeDeclRefExpr; // ctor
friend class DesignatedInitExpr; // ctor
friend class Expr;
friend class InitListExpr; // ctor
friend class ObjCArrayLiteral; // ctor
friend class ObjCDictionaryLiteral; // ctor
friend class ObjCMessageExpr; // ctor
friend class OffsetOfExpr; // ctor
friend class OpaqueValueExpr; // ctor
friend class OverloadExpr; // ctor
friend class ParenListExpr; // ctor
friend class PseudoObjectExpr; // ctor
friend class ShuffleVectorExpr; // ctor
unsigned : NumStmtBits;
unsigned ValueKind : 2;
unsigned ObjectKind : 3;
unsigned TypeDependent : 1;
unsigned ValueDependent : 1;
unsigned InstantiationDependent : 1;
unsigned ContainsUnexpandedParameterPack : 1;
};
enum { NumExprBits = NumStmtBits + 9 };
class ConstantExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class ConstantExpr;
unsigned : NumExprBits;
/// The kind of result that is trail-allocated.
unsigned ResultKind : 2;
/// Kind of Result as defined by APValue::Kind
unsigned APValueKind : 4;
/// When ResultKind == RSK_Int64. whether the trail-allocated integer is
/// signed.
unsigned IsUnsigned : 1;
/// When ResultKind == RSK_Int64. the BitWidth of the trail-allocated
/// integer. 7 bits because it is the minimal number of bit to represent a
/// value from 0 to 64 (the size of the trail-allocated number).
unsigned BitWidth : 7;
/// When ResultKind == RSK_APValue. Wether the ASTContext will cleanup the
/// destructor on the trail-allocated APValue.
unsigned HasCleanup : 1;
/// Whether this ConstantExpr was created for immediate invocation.
unsigned IsImmediateInvocation : 1;
};
class PredefinedExprBitfields {
friend class ASTStmtReader;
friend class PredefinedExpr;
unsigned : NumExprBits;
/// The kind of this PredefinedExpr. One of the enumeration values
/// in PredefinedExpr::IdentKind.
unsigned Kind : 4;
/// True if this PredefinedExpr has a trailing "StringLiteral *"
/// for the predefined identifier.
unsigned HasFunctionName : 1;
/// The location of this PredefinedExpr.
SourceLocation Loc;
};
class DeclRefExprBitfields {
friend class ASTStmtReader; // deserialization
friend class DeclRefExpr;
unsigned : NumExprBits;
unsigned HasQualifier : 1;
unsigned HasTemplateKWAndArgsInfo : 1;
unsigned HasFoundDecl : 1;
unsigned HadMultipleCandidates : 1;
unsigned RefersToEnclosingVariableOrCapture : 1;
unsigned NonOdrUseReason : 2;
/// The location of the declaration name itself.
SourceLocation Loc;
};
class FloatingLiteralBitfields {
friend class FloatingLiteral;
unsigned : NumExprBits;
unsigned Semantics : 3; // Provides semantics for APFloat construction
unsigned IsExact : 1;
};
class StringLiteralBitfields {
friend class ASTStmtReader;
friend class StringLiteral;
unsigned : NumExprBits;
/// The kind of this string literal.
/// One of the enumeration values of StringLiteral::StringKind.
unsigned Kind : 3;
/// The width of a single character in bytes. Only values of 1, 2,
/// and 4 bytes are supported. StringLiteral::mapCharByteWidth maps
/// the target + string kind to the appropriate CharByteWidth.
unsigned CharByteWidth : 3;
unsigned IsPascal : 1;
/// The number of concatenated token this string is made of.
/// This is the number of trailing SourceLocation.
unsigned NumConcatenated;
};
class CharacterLiteralBitfields {
friend class CharacterLiteral;
unsigned : NumExprBits;
unsigned Kind : 3;
};
class UnaryOperatorBitfields {
friend class UnaryOperator;
unsigned : NumExprBits;
unsigned Opc : 5;
unsigned CanOverflow : 1;
SourceLocation Loc;
};
class UnaryExprOrTypeTraitExprBitfields {
friend class UnaryExprOrTypeTraitExpr;
unsigned : NumExprBits;
unsigned Kind : 3;
unsigned IsType : 1; // true if operand is a type, false if an expression.
};
class ArraySubscriptExprBitfields {
friend class ArraySubscriptExpr;
unsigned : NumExprBits;
SourceLocation RBracketLoc;
};
class CallExprBitfields {
friend class CallExpr;
unsigned : NumExprBits;
unsigned NumPreArgs : 1;
/// True if the callee of the call expression was found using ADL.
unsigned UsesADL : 1;
/// Padding used to align OffsetToTrailingObjects to a byte multiple.
unsigned : 24 - 2 - NumExprBits;
/// The offset in bytes from the this pointer to the start of the
/// trailing objects belonging to CallExpr. Intentionally byte sized
/// for faster access.
unsigned OffsetToTrailingObjects : 8;
};
enum { NumCallExprBits = 32 };
class MemberExprBitfields {
friend class ASTStmtReader;
friend class MemberExpr;
unsigned : NumExprBits;
/// IsArrow - True if this is "X->F", false if this is "X.F".
unsigned IsArrow : 1;
/// True if this member expression used a nested-name-specifier to
/// refer to the member, e.g., "x->Base::f", or found its member via
/// a using declaration. When true, a MemberExprNameQualifier
/// structure is allocated immediately after the MemberExpr.
unsigned HasQualifierOrFoundDecl : 1;
/// True if this member expression specified a template keyword
/// and/or a template argument list explicitly, e.g., x->f<int>,
/// x->template f, x->template f<int>.
/// When true, an ASTTemplateKWAndArgsInfo structure and its
/// TemplateArguments (if any) are present.
unsigned HasTemplateKWAndArgsInfo : 1;
/// True if this member expression refers to a method that
/// was resolved from an overloaded set having size greater than 1.
unsigned HadMultipleCandidates : 1;
/// Value of type NonOdrUseReason indicating why this MemberExpr does
/// not constitute an odr-use of the named declaration. Meaningful only
/// when naming a static member.
unsigned NonOdrUseReason : 2;
/// This is the location of the -> or . in the expression.
SourceLocation OperatorLoc;
};
class CastExprBitfields {
friend class CastExpr;
friend class ImplicitCastExpr;
unsigned : NumExprBits;
unsigned Kind : 6;
unsigned PartOfExplicitCast : 1; // Only set for ImplicitCastExpr.
/// The number of CXXBaseSpecifiers in the cast. 14 bits would be enough
/// here. ([implimits] Direct and indirect base classes [16384]).
unsigned BasePathSize;
};
class BinaryOperatorBitfields {
friend class BinaryOperator;
unsigned : NumExprBits;
unsigned Opc : 6;
/// This is only meaningful for operations on floating point
/// types and 0 otherwise.
unsigned FPFeatures : 8;
SourceLocation OpLoc;
};
class InitListExprBitfields {
friend class InitListExpr;
unsigned : NumExprBits;
/// Whether this initializer list originally had a GNU array-range
/// designator in it. This is a temporary marker used by CodeGen.
unsigned HadArrayRangeDesignator : 1;
};
class ParenListExprBitfields {
friend class ASTStmtReader;
friend class ParenListExpr;
unsigned : NumExprBits;
/// The number of expressions in the paren list.
unsigned NumExprs;
};
class GenericSelectionExprBitfields {
friend class ASTStmtReader;
friend class GenericSelectionExpr;
unsigned : NumExprBits;
/// The location of the "_Generic".
SourceLocation GenericLoc;
};
class PseudoObjectExprBitfields {
friend class ASTStmtReader; // deserialization
friend class PseudoObjectExpr;
unsigned : NumExprBits;
// These don't need to be particularly wide, because they're
// strictly limited by the forms of expressions we permit.
unsigned NumSubExprs : 8;
unsigned ResultIndex : 32 - 8 - NumExprBits;
};
class SourceLocExprBitfields {
friend class ASTStmtReader;
friend class SourceLocExpr;
unsigned : NumExprBits;
/// The kind of source location builtin represented by the SourceLocExpr.
/// Ex. __builtin_LINE, __builtin_FUNCTION, ect.
unsigned Kind : 2;
};
//===--- C++ Expression bitfields classes ---===//
class CXXOperatorCallExprBitfields {
friend class ASTStmtReader;
friend class CXXOperatorCallExpr;
unsigned : NumCallExprBits;
/// The kind of this overloaded operator. One of the enumerator
/// value of OverloadedOperatorKind.
unsigned OperatorKind : 6;
// Only meaningful for floating point types.
unsigned FPFeatures : 8;
};
class CXXRewrittenBinaryOperatorBitfields {
friend class ASTStmtReader;
friend class CXXRewrittenBinaryOperator;
unsigned : NumCallExprBits;
unsigned IsReversed : 1;
};
class CXXBoolLiteralExprBitfields {
friend class CXXBoolLiteralExpr;
unsigned : NumExprBits;
/// The value of the boolean literal.
unsigned Value : 1;
/// The location of the boolean literal.
SourceLocation Loc;
};
class CXXNullPtrLiteralExprBitfields {
friend class CXXNullPtrLiteralExpr;
unsigned : NumExprBits;
/// The location of the null pointer literal.
SourceLocation Loc;
};
class CXXThisExprBitfields {
friend class CXXThisExpr;
unsigned : NumExprBits;
/// Whether this is an implicit "this".
unsigned IsImplicit : 1;
/// The location of the "this".
SourceLocation Loc;
};
class CXXThrowExprBitfields {
friend class ASTStmtReader;
friend class CXXThrowExpr;
unsigned : NumExprBits;
/// Whether the thrown variable (if any) is in scope.
unsigned IsThrownVariableInScope : 1;
/// The location of the "throw".
SourceLocation ThrowLoc;
};
class CXXDefaultArgExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultArgExpr;
unsigned : NumExprBits;
/// The location where the default argument expression was used.
SourceLocation Loc;
};
class CXXDefaultInitExprBitfields {
friend class ASTStmtReader;
friend class CXXDefaultInitExpr;
unsigned : NumExprBits;
/// The location where the default initializer expression was used.
SourceLocation Loc;
};
class CXXScalarValueInitExprBitfields {
friend class ASTStmtReader;
friend class CXXScalarValueInitExpr;
unsigned : NumExprBits;
SourceLocation RParenLoc;
};
class CXXNewExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class CXXNewExpr;
unsigned : NumExprBits;
/// Was the usage ::new, i.e. is the global new to be used?
unsigned IsGlobalNew : 1;
/// Do we allocate an array? If so, the first trailing "Stmt *" is the
/// size expression.
unsigned IsArray : 1;
/// Should the alignment be passed to the allocation function?
unsigned ShouldPassAlignment : 1;
/// If this is an array allocation, does the usual deallocation
/// function for the allocated type want to know the allocated size?
unsigned UsualArrayDeleteWantsSize : 1;
/// What kind of initializer do we have? Could be none, parens, or braces.
/// In storage, we distinguish between "none, and no initializer expr", and
/// "none, but an implicit initializer expr".
unsigned StoredInitializationStyle : 2;
/// True if the allocated type was expressed as a parenthesized type-id.
unsigned IsParenTypeId : 1;
/// The number of placement new arguments.
unsigned NumPlacementArgs;
};
class CXXDeleteExprBitfields {
friend class ASTStmtReader;
friend class CXXDeleteExpr;
unsigned : NumExprBits;
/// Is this a forced global delete, i.e. "::delete"?
unsigned GlobalDelete : 1;
/// Is this the array form of delete, i.e. "delete[]"?
unsigned ArrayForm : 1;
/// ArrayFormAsWritten can be different from ArrayForm if 'delete' is
/// applied to pointer-to-array type (ArrayFormAsWritten will be false
/// while ArrayForm will be true).
unsigned ArrayFormAsWritten : 1;
/// Does the usual deallocation function for the element type require
/// a size_t argument?
unsigned UsualArrayDeleteWantsSize : 1;
/// Location of the expression.
SourceLocation Loc;
};
class TypeTraitExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class TypeTraitExpr;
unsigned : NumExprBits;
/// The kind of type trait, which is a value of a TypeTrait enumerator.
unsigned Kind : 8;
/// If this expression is not value-dependent, this indicates whether
/// the trait evaluated true or false.
unsigned Value : 1;
/// The number of arguments to this type trait.
unsigned NumArgs : 32 - 8 - 1 - NumExprBits;
};
class DependentScopeDeclRefExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class DependentScopeDeclRefExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
};
class CXXConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXConstructExpr;
unsigned : NumExprBits;
unsigned Elidable : 1;
unsigned HadMultipleCandidates : 1;
unsigned ListInitialization : 1;
unsigned StdInitListInitialization : 1;
unsigned ZeroInitialization : 1;
unsigned ConstructionKind : 3;
SourceLocation Loc;
};
class ExprWithCleanupsBitfields {
friend class ASTStmtReader; // deserialization
friend class ExprWithCleanups;
unsigned : NumExprBits;
// When false, it must not have side effects.
unsigned CleanupsHaveSideEffects : 1;
unsigned NumObjects : 32 - 1 - NumExprBits;
};
class CXXUnresolvedConstructExprBitfields {
friend class ASTStmtReader;
friend class CXXUnresolvedConstructExpr;
unsigned : NumExprBits;
/// The number of arguments used to construct the type.
unsigned NumArgs;
};
class CXXDependentScopeMemberExprBitfields {
friend class ASTStmtReader;
friend class CXXDependentScopeMemberExpr;
unsigned : NumExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether this member expression has info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// See getFirstQualifierFoundInScope() and the comment listing
/// the trailing objects.
unsigned HasFirstQualifierFoundInScope : 1;
/// The location of the '->' or '.' operator.
SourceLocation OperatorLoc;
};
class OverloadExprBitfields {
friend class ASTStmtReader;
friend class OverloadExpr;
unsigned : NumExprBits;
/// Whether the name includes info for explicit template
/// keyword and arguments.
unsigned HasTemplateKWAndArgsInfo : 1;
/// Padding used by the derived classes to store various bits. If you
/// need to add some data here, shrink this padding and add your data
/// above. NumOverloadExprBits also needs to be updated.
unsigned : 32 - NumExprBits - 1;
/// The number of results.
unsigned NumResults;
};
enum { NumOverloadExprBits = NumExprBits + 1 };
class UnresolvedLookupExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedLookupExpr;
unsigned : NumOverloadExprBits;
/// True if these lookup results should be extended by
/// argument-dependent lookup if this is the operand of a function call.
unsigned RequiresADL : 1;
/// True if these lookup results are overloaded. This is pretty trivially
/// rederivable if we urgently need to kill this field.
unsigned Overloaded : 1;
};
static_assert(sizeof(UnresolvedLookupExprBitfields) <= 4,
"UnresolvedLookupExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class UnresolvedMemberExprBitfields {
friend class ASTStmtReader;
friend class UnresolvedMemberExpr;
unsigned : NumOverloadExprBits;
/// Whether this member expression used the '->' operator or
/// the '.' operator.
unsigned IsArrow : 1;
/// Whether the lookup results contain an unresolved using declaration.
unsigned HasUnresolvedUsing : 1;
};
static_assert(sizeof(UnresolvedMemberExprBitfields) <= 4,
"UnresolvedMemberExprBitfields must be <= than 4 bytes to"
"avoid trashing OverloadExprBitfields::NumResults!");
class CXXNoexceptExprBitfields {
friend class ASTStmtReader;
friend class CXXNoexceptExpr;
unsigned : NumExprBits;
unsigned Value : 1;
};
class SubstNonTypeTemplateParmExprBitfields {
friend class ASTStmtReader;
friend class SubstNonTypeTemplateParmExpr;
unsigned : NumExprBits;
/// The location of the non-type template parameter reference.
SourceLocation NameLoc;
};
class RequiresExprBitfields {
friend class ASTStmtReader;
friend class ASTStmtWriter;
friend class RequiresExpr;
unsigned : NumExprBits;
unsigned IsSatisfied : 1;
SourceLocation RequiresKWLoc;
};
//===--- C++ Coroutines TS bitfields classes ---===//
class CoawaitExprBitfields {
friend class CoawaitExpr;
unsigned : NumExprBits;
unsigned IsImplicit : 1;
};
//===--- Obj-C Expression bitfields classes ---===//
class ObjCIndirectCopyRestoreExprBitfields {
friend class ObjCIndirectCopyRestoreExpr;
unsigned : NumExprBits;
unsigned ShouldCopy : 1;
};
//===--- Clang Extensions bitfields classes ---===//
class OpaqueValueExprBitfields {
friend class ASTStmtReader;
friend class OpaqueValueExpr;
unsigned : NumExprBits;
/// The OVE is a unique semantic reference to its source expression if this
/// bit is set to true.
unsigned IsUnique : 1;
SourceLocation Loc;
};
union {
// Same order as in StmtNodes.td.
// Statements
StmtBitfields StmtBits;
NullStmtBitfields NullStmtBits;
CompoundStmtBitfields CompoundStmtBits;
LabelStmtBitfields LabelStmtBits;
AttributedStmtBitfields AttributedStmtBits;
IfStmtBitfields IfStmtBits;
SwitchStmtBitfields SwitchStmtBits;
WhileStmtBitfields WhileStmtBits;
DoStmtBitfields DoStmtBits;
ForStmtBitfields ForStmtBits;
GotoStmtBitfields GotoStmtBits;
ContinueStmtBitfields ContinueStmtBits;
BreakStmtBitfields BreakStmtBits;
ReturnStmtBitfields ReturnStmtBits;
SwitchCaseBitfields SwitchCaseBits;
// Expressions
ExprBitfields ExprBits;
ConstantExprBitfields ConstantExprBits;
PredefinedExprBitfields PredefinedExprBits;
DeclRefExprBitfields DeclRefExprBits;
FloatingLiteralBitfields FloatingLiteralBits;
StringLiteralBitfields StringLiteralBits;
CharacterLiteralBitfields CharacterLiteralBits;
UnaryOperatorBitfields UnaryOperatorBits;
UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits;
ArraySubscriptExprBitfields ArraySubscriptExprBits;
CallExprBitfields CallExprBits;
MemberExprBitfields MemberExprBits;
CastExprBitfields CastExprBits;
BinaryOperatorBitfields BinaryOperatorBits;
InitListExprBitfields InitListExprBits;
ParenListExprBitfields ParenListExprBits;
GenericSelectionExprBitfields GenericSelectionExprBits;
PseudoObjectExprBitfields PseudoObjectExprBits;
SourceLocExprBitfields SourceLocExprBits;
// C++ Expressions
CXXOperatorCallExprBitfields CXXOperatorCallExprBits;
CXXRewrittenBinaryOperatorBitfields CXXRewrittenBinaryOperatorBits;
CXXBoolLiteralExprBitfields CXXBoolLiteralExprBits;
CXXNullPtrLiteralExprBitfields CXXNullPtrLiteralExprBits;
CXXThisExprBitfields CXXThisExprBits;
CXXThrowExprBitfields CXXThrowExprBits;
CXXDefaultArgExprBitfields CXXDefaultArgExprBits;
CXXDefaultInitExprBitfields CXXDefaultInitExprBits;
CXXScalarValueInitExprBitfields CXXScalarValueInitExprBits;
CXXNewExprBitfields CXXNewExprBits;
CXXDeleteExprBitfields CXXDeleteExprBits;
TypeTraitExprBitfields TypeTraitExprBits;
DependentScopeDeclRefExprBitfields DependentScopeDeclRefExprBits;
CXXConstructExprBitfields CXXConstructExprBits;
ExprWithCleanupsBitfields ExprWithCleanupsBits;
CXXUnresolvedConstructExprBitfields CXXUnresolvedConstructExprBits;
CXXDependentScopeMemberExprBitfields CXXDependentScopeMemberExprBits;
OverloadExprBitfields OverloadExprBits;
UnresolvedLookupExprBitfields UnresolvedLookupExprBits;
UnresolvedMemberExprBitfields UnresolvedMemberExprBits;
CXXNoexceptExprBitfields CXXNoexceptExprBits;
SubstNonTypeTemplateParmExprBitfields SubstNonTypeTemplateParmExprBits;
RequiresExprBitfields RequiresExprBits;
// C++ Coroutines TS expressions
CoawaitExprBitfields CoawaitBits;
// Obj-C Expressions
ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits;
// Clang Extensions
OpaqueValueExprBitfields OpaqueValueExprBits;
};
public:
// Only allow allocation of Stmts using the allocator in ASTContext
// or by doing a placement new.
void* operator new(size_t bytes, const ASTContext& C,
unsigned alignment = 8);
void* operator new(size_t bytes, const ASTContext* C,
unsigned alignment = 8) {
return operator new(bytes, *C, alignment);
}
void *operator new(size_t bytes, void *mem) noexcept { return mem; }
void operator delete(void *, const ASTContext &, unsigned) noexcept {}
void operator delete(void *, const ASTContext *, unsigned) noexcept {}
void operator delete(void *, size_t) noexcept {}
void operator delete(void *, void *) noexcept {}
public:
/// A placeholder type used to construct an empty shell of a
/// type, that will be filled in later (e.g., by some
/// de-serialization).
struct EmptyShell {};
protected:
/// Iterator for iterating over Stmt * arrays that contain only T *.
///
/// This is needed because AST nodes use Stmt* arrays to store
/// references to children (to be compatible with StmtIterator).
template<typename T, typename TPtr = T *, typename StmtPtr = Stmt *>
struct CastIterator
: llvm::iterator_adaptor_base<CastIterator<T, TPtr, StmtPtr>, StmtPtr *,
std::random_access_iterator_tag, TPtr> {
using Base = typename CastIterator::iterator_adaptor_base;
CastIterator() : Base(nullptr) {}
CastIterator(StmtPtr *I) : Base(I) {}
typename Base::value_type operator*() const {
return cast_or_null<T>(*this->I);
}
};
/// Const iterator for iterating over Stmt * arrays that contain only T *.
template <typename T>
using ConstCastIterator = CastIterator<T, const T *const, const Stmt *const>;
using ExprIterator = CastIterator<Expr>;
using ConstExprIterator = ConstCastIterator<Expr>;
private:
/// Whether statistic collection is enabled.
static bool StatisticsEnabled;
protected:
/// Construct an empty statement.
explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
Stmt() = delete;
Stmt(const Stmt &) = delete;
Stmt(Stmt &&) = delete;
Stmt &operator=(const Stmt &) = delete;
Stmt &operator=(Stmt &&) = delete;
Stmt(StmtClass SC) {
static_assert(sizeof(*this) <= 8,
"changing bitfields changed sizeof(Stmt)");
static_assert(sizeof(*this) % alignof(void *) == 0,
"Insufficient alignment!");
StmtBits.sClass = SC;
StmtBits.IsOMPStructuredBlock = false;
if (StatisticsEnabled) Stmt::addStmtClass(SC);
}
StmtClass getStmtClass() const {
return static_cast<StmtClass>(StmtBits.sClass);
}
const char *getStmtClassName() const;
bool isOMPStructuredBlock() const { return StmtBits.IsOMPStructuredBlock; }
void setIsOMPStructuredBlock(bool IsOMPStructuredBlock) {
StmtBits.IsOMPStructuredBlock = IsOMPStructuredBlock;
}
/// SourceLocation tokens are not useful in isolation - they are low level
/// value objects created/interpreted by SourceManager. We assume AST
/// clients will have a pointer to the respective SourceManager.
SourceRange getSourceRange() const LLVM_READONLY;
SourceLocation getBeginLoc() const LLVM_READONLY;
SourceLocation getEndLoc() const LLVM_READONLY;
// global temp stats (until we have a per-module visitor)
static void addStmtClass(const StmtClass s);
static void EnableStatistics();
static void PrintStats();
/// Dumps the specified AST fragment and all subtrees to
/// \c llvm::errs().
void dump() const;
void dump(SourceManager &SM) const;
void dump(raw_ostream &OS, SourceManager &SM) const;
void dump(raw_ostream &OS) const;
/// \return Unique reproducible object identifier
int64_t getID(const ASTContext &Context) const;
/// dumpColor - same as dump(), but forces color highlighting.
void dumpColor() const;
/// dumpPretty/printPretty - These two methods do a "pretty print" of the AST
/// back to its original source language syntax.
void dumpPretty(const ASTContext &Context) const;
void printPretty(raw_ostream &OS, PrinterHelper *Helper,
const PrintingPolicy &Policy, unsigned Indentation = 0,
StringRef NewlineSymbol = "\n",
const ASTContext *Context = nullptr) const;
/// Pretty-prints in JSON format.
void printJson(raw_ostream &Out, PrinterHelper *Helper,
const PrintingPolicy &Policy, bool AddQuotes) const;
/// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only
/// works on systems with GraphViz (Mac OS X) or dot+gv installed.
void viewAST() const;
/// Skip no-op (attributed, compound) container stmts and skip captured
/// stmt at the top, if \a IgnoreCaptured is true.
Stmt *IgnoreContainers(bool IgnoreCaptured = false);
const Stmt *IgnoreContainers(bool IgnoreCaptured = false) const {
return const_cast<Stmt *>(this)->IgnoreContainers(IgnoreCaptured);
}
const Stmt *stripLabelLikeStatements() const;
Stmt *stripLabelLikeStatements() {
return const_cast<Stmt*>(
const_cast<const Stmt*>(this)->stripLabelLikeStatements());
}
/// Child Iterators: All subclasses must implement 'children'
/// to permit easy iteration over the substatements/subexpessions of an
/// AST node. This permits easy iteration over all nodes in the AST.
using child_iterator = StmtIterator;
using const_child_iterator = ConstStmtIterator;
using child_range = llvm::iterator_range<child_iterator>;
using const_child_range = llvm::iterator_range<const_child_iterator>;
child_range children();
const_child_range children() const {
auto Children = const_cast<Stmt *>(this)->children();
return const_child_range(Children.begin(), Children.end());
}
child_iterator child_begin() { return children().begin(); }
child_iterator child_end() { return children().end(); }
const_child_iterator child_begin() const { return children().begin(); }
const_child_iterator child_end() const { return children().end(); }
/// Produce a unique representation of the given statement.
///
/// \param ID once the profiling operation is complete, will contain
/// the unique representation of the given statement.
///
/// \param Context the AST context in which the statement resides
///
/// \param Canonical whether the profile should be based on the canonical
/// representation of this statement (e.g., where non-type template
/// parameters are identified by index/level rather than their
/// declaration pointers) or the exact representation of the statement as
/// written in the source.
void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context,
bool Canonical) const;
/// Calculate a unique representation for a statement that is
/// stable across compiler invocations.
///
/// \param ID profile information will be stored in ID.
///
/// \param Hash an ODRHash object which will be called where pointers would
/// have been used in the Profile function.
void ProcessODRHash(llvm::FoldingSetNodeID &ID, ODRHash& Hash) const;
};
/// DeclStmt - Adaptor class for mixing declarations with statements and
/// expressions. For example, CompoundStmt mixes statements, expressions
/// and declarations (variables, types). Another example is ForStmt, where
/// the first statement can be an expression or a declaration.
class DeclStmt : public Stmt {
DeclGroupRef DG;
SourceLocation StartLoc, EndLoc;
public:
DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc)
: Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {}
/// Build an empty declaration statement.
explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) {}
/// isSingleDecl - This method returns true if this DeclStmt refers
/// to a single Decl.
bool isSingleDecl() const { return DG.isSingleDecl(); }
const Decl *getSingleDecl() const { return DG.getSingleDecl(); }
Decl *getSingleDecl() { return DG.getSingleDecl(); }
const DeclGroupRef getDeclGroup() const { return DG; }
DeclGroupRef getDeclGroup() { return DG; }
void setDeclGroup(DeclGroupRef DGR) { DG = DGR; }
void setStartLoc(SourceLocation L) { StartLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return StartLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DeclStmtClass;
}
// Iterators over subexpressions.
child_range children() {
return child_range(child_iterator(DG.begin(), DG.end()),
child_iterator(DG.end(), DG.end()));
}
const_child_range children() const {
auto Children = const_cast<DeclStmt *>(this)->children();
return const_child_range(Children);
}
using decl_iterator = DeclGroupRef::iterator;
using const_decl_iterator = DeclGroupRef::const_iterator;
using decl_range = llvm::iterator_range<decl_iterator>;
using decl_const_range = llvm::iterator_range<const_decl_iterator>;
decl_range decls() { return decl_range(decl_begin(), decl_end()); }
decl_const_range decls() const {
return decl_const_range(decl_begin(), decl_end());
}
decl_iterator decl_begin() { return DG.begin(); }
decl_iterator decl_end() { return DG.end(); }
const_decl_iterator decl_begin() const { return DG.begin(); }
const_decl_iterator decl_end() const { return DG.end(); }
using reverse_decl_iterator = std::reverse_iterator<decl_iterator>;
reverse_decl_iterator decl_rbegin() {
return reverse_decl_iterator(decl_end());
}
reverse_decl_iterator decl_rend() {
return reverse_decl_iterator(decl_begin());
}
};
/// NullStmt - This is the null statement ";": C99 6.8.3p3.
///
class NullStmt : public Stmt {
public:
NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false)
: Stmt(NullStmtClass) {
NullStmtBits.HasLeadingEmptyMacro = hasLeadingEmptyMacro;
setSemiLoc(L);
}
/// Build an empty null statement.
explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty) {}
SourceLocation getSemiLoc() const { return NullStmtBits.SemiLoc; }
void setSemiLoc(SourceLocation L) { NullStmtBits.SemiLoc = L; }
bool hasLeadingEmptyMacro() const {
return NullStmtBits.HasLeadingEmptyMacro;
}
SourceLocation getBeginLoc() const { return getSemiLoc(); }
SourceLocation getEndLoc() const { return getSemiLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == NullStmtClass;
}
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// CompoundStmt - This represents a group of statements like { stmt stmt }.
class CompoundStmt final : public Stmt,
private llvm::TrailingObjects<CompoundStmt, Stmt *> {
friend class ASTStmtReader;
friend TrailingObjects;
/// The location of the closing "}". LBraceLoc is stored in CompoundStmtBits.
SourceLocation RBraceLoc;
CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB, SourceLocation RB);
explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty) {}
void setStmts(ArrayRef<Stmt *> Stmts);
public:
static CompoundStmt *Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
SourceLocation LB, SourceLocation RB);
// Build an empty compound statement with a location.
explicit CompoundStmt(SourceLocation Loc)
: Stmt(CompoundStmtClass), RBraceLoc(Loc) {
CompoundStmtBits.NumStmts = 0;
CompoundStmtBits.LBraceLoc = Loc;
}
// Build an empty compound statement.
static CompoundStmt *CreateEmpty(const ASTContext &C, unsigned NumStmts);
bool body_empty() const { return CompoundStmtBits.NumStmts == 0; }
unsigned size() const { return CompoundStmtBits.NumStmts; }
using body_iterator = Stmt **;
using body_range = llvm::iterator_range<body_iterator>;
body_range body() { return body_range(body_begin(), body_end()); }
body_iterator body_begin() { return getTrailingObjects<Stmt *>(); }
body_iterator body_end() { return body_begin() + size(); }
Stmt *body_front() { return !body_empty() ? body_begin()[0] : nullptr; }
Stmt *body_back() {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using const_body_iterator = Stmt *const *;
using body_const_range = llvm::iterator_range<const_body_iterator>;
body_const_range body() const {
return body_const_range(body_begin(), body_end());
}
const_body_iterator body_begin() const {
return getTrailingObjects<Stmt *>();
}
const_body_iterator body_end() const { return body_begin() + size(); }
const Stmt *body_front() const {
return !body_empty() ? body_begin()[0] : nullptr;
}
const Stmt *body_back() const {
return !body_empty() ? body_begin()[size() - 1] : nullptr;
}
using reverse_body_iterator = std::reverse_iterator<body_iterator>;
reverse_body_iterator body_rbegin() {
return reverse_body_iterator(body_end());
}
reverse_body_iterator body_rend() {
return reverse_body_iterator(body_begin());
}
using const_reverse_body_iterator =
std::reverse_iterator<const_body_iterator>;
const_reverse_body_iterator body_rbegin() const {
return const_reverse_body_iterator(body_end());
}
const_reverse_body_iterator body_rend() const {
return const_reverse_body_iterator(body_begin());
}
// Get the Stmt that StmtExpr would consider to be the result of this
// compound statement. This is used by StmtExpr to properly emulate the GCC
// compound expression extension, which ignores trailing NullStmts when
// getting the result of the expression.
// i.e. ({ 5;;; })
// ^^ ignored
// If we don't find something that isn't a NullStmt, just return the last
// Stmt.
Stmt *getStmtExprResult() {
for (auto *B : llvm::reverse(body())) {
if (!isa<NullStmt>(B))
return B;
}
return body_back();
}
const Stmt *getStmtExprResult() const {
return const_cast<CompoundStmt *>(this)->getStmtExprResult();
}
SourceLocation getBeginLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getEndLoc() const { return RBraceLoc; }
SourceLocation getLBracLoc() const { return CompoundStmtBits.LBraceLoc; }
SourceLocation getRBracLoc() const { return RBraceLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == CompoundStmtClass;
}
// Iterators
child_range children() { return child_range(body_begin(), body_end()); }
const_child_range children() const {
return const_child_range(body_begin(), body_end());
}
};
// SwitchCase is the base class for CaseStmt and DefaultStmt,
class SwitchCase : public Stmt {
protected:
/// The location of the ":".
SourceLocation ColonLoc;
// The location of the "case" or "default" keyword. Stored in SwitchCaseBits.
// SourceLocation KeywordLoc;
/// A pointer to the following CaseStmt or DefaultStmt class,
/// used by SwitchStmt.
SwitchCase *NextSwitchCase = nullptr;
SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc)
: Stmt(SC), ColonLoc(ColonLoc) {
setKeywordLoc(KWLoc);
}
SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC) {}
public:
const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; }
SwitchCase *getNextSwitchCase() { return NextSwitchCase; }
void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; }
SourceLocation getKeywordLoc() const { return SwitchCaseBits.KeywordLoc; }
void setKeywordLoc(SourceLocation L) { SwitchCaseBits.KeywordLoc = L; }
SourceLocation getColonLoc() const { return ColonLoc; }
void setColonLoc(SourceLocation L) { ColonLoc = L; }
inline Stmt *getSubStmt();
const Stmt *getSubStmt() const {
return const_cast<SwitchCase *>(this)->getSubStmt();
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
inline SourceLocation getEndLoc() const LLVM_READONLY;
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass ||
T->getStmtClass() == DefaultStmtClass;
}
};
/// CaseStmt - Represent a case statement. It can optionally be a GNU case
/// statement of the form LHS ... RHS representing a range of cases.
class CaseStmt final
: public SwitchCase,
private llvm::TrailingObjects<CaseStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// CaseStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing objects
// at the end but this would impact children().
// The trailing objects are in order:
//
// * A "Stmt *" for the LHS of the case statement. Always present.
//
// * A "Stmt *" for the RHS of the case statement. This is a GNU extension
// which allow ranges in cases statement of the form LHS ... RHS.
// Present if and only if caseStmtIsGNURange() is true.
//
// * A "Stmt *" for the substatement of the case statement. Always present.
//
// * A SourceLocation for the location of the ... if this is a case statement
// with a range. Present if and only if caseStmtIsGNURange() is true.
enum { LhsOffset = 0, SubStmtOffsetFromRhs = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + caseStmtIsGNURange();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return caseStmtIsGNURange();
}
unsigned lhsOffset() const { return LhsOffset; }
unsigned rhsOffset() const { return LhsOffset + caseStmtIsGNURange(); }
unsigned subStmtOffset() const { return rhsOffset() + SubStmtOffsetFromRhs; }
/// Build a case statement assuming that the storage for the
/// trailing objects has been properly allocated.
CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc,
SourceLocation ellipsisLoc, SourceLocation colonLoc)
: SwitchCase(CaseStmtClass, caseLoc, colonLoc) {
// Handle GNU case statements of the form LHS ... RHS.
bool IsGNURange = rhs != nullptr;
SwitchCaseBits.CaseStmtIsGNURange = IsGNURange;
setLHS(lhs);
setSubStmt(nullptr);
if (IsGNURange) {
setRHS(rhs);
setEllipsisLoc(ellipsisLoc);
}
}
/// Build an empty switch case statement.
explicit CaseStmt(EmptyShell Empty, bool CaseStmtIsGNURange)
: SwitchCase(CaseStmtClass, Empty) {
SwitchCaseBits.CaseStmtIsGNURange = CaseStmtIsGNURange;
}
public:
/// Build a case statement.
static CaseStmt *Create(const ASTContext &Ctx, Expr *lhs, Expr *rhs,
SourceLocation caseLoc, SourceLocation ellipsisLoc,
SourceLocation colonLoc);
/// Build an empty case statement.
static CaseStmt *CreateEmpty(const ASTContext &Ctx, bool CaseStmtIsGNURange);
/// True if this case statement is of the form case LHS ... RHS, which
/// is a GNU extension. In this case the RHS can be obtained with getRHS()
/// and the location of the ellipsis can be obtained with getEllipsisLoc().
bool caseStmtIsGNURange() const { return SwitchCaseBits.CaseStmtIsGNURange; }
SourceLocation getCaseLoc() const { return getKeywordLoc(); }
void setCaseLoc(SourceLocation L) { setKeywordLoc(L); }
/// Get the location of the ... in a case statement of the form LHS ... RHS.
SourceLocation getEllipsisLoc() const {
return caseStmtIsGNURange() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
/// Set the location of the ... in a case statement of the form LHS ... RHS.
/// Assert that this case statement is of this form.
void setEllipsisLoc(SourceLocation L) {
assert(
caseStmtIsGNURange() &&
"setEllipsisLoc but this is not a case stmt of the form LHS ... RHS!");
*getTrailingObjects<SourceLocation>() = L;
}
Expr *getLHS() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
const Expr *getLHS() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[lhsOffset()]);
}
void setLHS(Expr *Val) {
getTrailingObjects<Stmt *>()[lhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Expr *getRHS() {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
const Expr *getRHS() const {
return caseStmtIsGNURange() ? reinterpret_cast<Expr *>(
getTrailingObjects<Stmt *>()[rhsOffset()])
: nullptr;
}
void setRHS(Expr *Val) {
assert(caseStmtIsGNURange() &&
"setRHS but this is not a case stmt of the form LHS ... RHS!");
getTrailingObjects<Stmt *>()[rhsOffset()] = reinterpret_cast<Stmt *>(Val);
}
Stmt *getSubStmt() { return getTrailingObjects<Stmt *>()[subStmtOffset()]; }
const Stmt *getSubStmt() const {
return getTrailingObjects<Stmt *>()[subStmtOffset()];
}
void setSubStmt(Stmt *S) {
getTrailingObjects<Stmt *>()[subStmtOffset()] = S;
}
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
// Handle deeply nested case statements with iteration instead of recursion.
const CaseStmt *CS = this;
while (const auto *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt()))
CS = CS2;
return CS->getSubStmt()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CaseStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
class DefaultStmt : public SwitchCase {
Stmt *SubStmt;
public:
DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt)
: SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {}
/// Build an empty default statement.
explicit DefaultStmt(EmptyShell Empty)
: SwitchCase(DefaultStmtClass, Empty) {}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *S) { SubStmt = S; }
SourceLocation getDefaultLoc() const { return getKeywordLoc(); }
void setDefaultLoc(SourceLocation L) { setKeywordLoc(L); }
SourceLocation getBeginLoc() const { return getKeywordLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return SubStmt->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == DefaultStmtClass;
}
// Iterators
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
};
SourceLocation SwitchCase::getEndLoc() const {
if (const auto *CS = dyn_cast<CaseStmt>(this))
return CS->getEndLoc();
else if (const auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getEndLoc();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
Stmt *SwitchCase::getSubStmt() {
if (auto *CS = dyn_cast<CaseStmt>(this))
return CS->getSubStmt();
else if (auto *DS = dyn_cast<DefaultStmt>(this))
return DS->getSubStmt();
llvm_unreachable("SwitchCase is neither a CaseStmt nor a DefaultStmt!");
}
/// Represents a statement that could possibly have a value and type. This
/// covers expression-statements, as well as labels and attributed statements.
///
/// Value statements have a special meaning when they are the last non-null
/// statement in a GNU statement expression, where they determine the value
/// of the statement expression.
class ValueStmt : public Stmt {
protected:
using Stmt::Stmt;
public:
const Expr *getExprStmt() const;
Expr *getExprStmt() {
const ValueStmt *ConstThis = this;
return const_cast<Expr*>(ConstThis->getExprStmt());
}
static bool classof(const Stmt *T) {
return T->getStmtClass() >= firstValueStmtConstant &&
T->getStmtClass() <= lastValueStmtConstant;
}
};
/// LabelStmt - Represents a label, which has a substatement. For example:
/// foo: return;
class LabelStmt : public ValueStmt {
LabelDecl *TheDecl;
Stmt *SubStmt;
public:
/// Build a label statement.
LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt)
: ValueStmt(LabelStmtClass), TheDecl(D), SubStmt(substmt) {
setIdentLoc(IL);
}
/// Build an empty label statement.
explicit LabelStmt(EmptyShell Empty) : ValueStmt(LabelStmtClass, Empty) {}
SourceLocation getIdentLoc() const { return LabelStmtBits.IdentLoc; }
void setIdentLoc(SourceLocation L) { LabelStmtBits.IdentLoc = L; }
LabelDecl *getDecl() const { return TheDecl; }
void setDecl(LabelDecl *D) { TheDecl = D; }
const char *getName() const;
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
void setSubStmt(Stmt *SS) { SubStmt = SS; }
SourceLocation getBeginLoc() const { return getIdentLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == LabelStmtClass;
}
};
/// Represents an attribute applied to a statement.
///
/// Represents an attribute applied to a statement. For example:
/// [[omp::for(...)]] for (...) { ... }
class AttributedStmt final
: public ValueStmt,
private llvm::TrailingObjects<AttributedStmt, const Attr *> {
friend class ASTStmtReader;
friend TrailingObjects;
Stmt *SubStmt;
AttributedStmt(SourceLocation Loc, ArrayRef<const Attr *> Attrs,
Stmt *SubStmt)
: ValueStmt(AttributedStmtClass), SubStmt(SubStmt) {
AttributedStmtBits.NumAttrs = Attrs.size();
AttributedStmtBits.AttrLoc = Loc;
std::copy(Attrs.begin(), Attrs.end(), getAttrArrayPtr());
}
explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs)
: ValueStmt(AttributedStmtClass, Empty) {
AttributedStmtBits.NumAttrs = NumAttrs;
AttributedStmtBits.AttrLoc = SourceLocation{};
std::fill_n(getAttrArrayPtr(), NumAttrs, nullptr);
}
const Attr *const *getAttrArrayPtr() const {
return getTrailingObjects<const Attr *>();
}
const Attr **getAttrArrayPtr() { return getTrailingObjects<const Attr *>(); }
public:
static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc,
ArrayRef<const Attr *> Attrs, Stmt *SubStmt);
// Build an empty attributed statement.
static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs);
SourceLocation getAttrLoc() const { return AttributedStmtBits.AttrLoc; }
ArrayRef<const Attr *> getAttrs() const {
return llvm::makeArrayRef(getAttrArrayPtr(), AttributedStmtBits.NumAttrs);
}
Stmt *getSubStmt() { return SubStmt; }
const Stmt *getSubStmt() const { return SubStmt; }
SourceLocation getBeginLoc() const { return getAttrLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return SubStmt->getEndLoc();}
child_range children() { return child_range(&SubStmt, &SubStmt + 1); }
const_child_range children() const {
return const_child_range(&SubStmt, &SubStmt + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == AttributedStmtClass;
}
};
/// IfStmt - This represents an if/then/else.
class IfStmt final
: public Stmt,
private llvm::TrailingObjects<IfStmt, Stmt *, SourceLocation> {
friend TrailingObjects;
// IfStmt is followed by several trailing objects, some of which optional.
// Note that it would be more convenient to put the optional trailing
// objects at then end but this would change the order of the children.
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact a "Expr *".
//
// * A "Stmt *" for the then statement.
// Always present.
//
// * A "Stmt *" for the else statement.
// Present if and only if hasElseStorage().
//
// * A "SourceLocation" for the location of the "else".
// Present if and only if hasElseStorage().
enum { InitOffset = 0, ThenOffsetFromCond = 1, ElseOffsetFromCond = 2 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasElseStorage() + hasVarStorage() +
hasInitStorage();
}
unsigned numTrailingObjects(OverloadToken<SourceLocation>) const {
return hasElseStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned thenOffset() const { return condOffset() + ThenOffsetFromCond; }
unsigned elseOffset() const { return condOffset() + ElseOffsetFromCond; }
/// Build an if/then/else statement.
IfStmt(const ASTContext &Ctx, SourceLocation IL, bool IsConstexpr, Stmt *Init,
VarDecl *Var, Expr *Cond, Stmt *Then, SourceLocation EL, Stmt *Else);
/// Build an empty if/then/else statement.
explicit IfStmt(EmptyShell Empty, bool HasElse, bool HasVar, bool HasInit);
public:
/// Create an IfStmt.
static IfStmt *Create(const ASTContext &Ctx, SourceLocation IL,
bool IsConstexpr, Stmt *Init, VarDecl *Var, Expr *Cond,
Stmt *Then, SourceLocation EL = SourceLocation(),
Stmt *Else = nullptr);
/// Create an empty IfStmt optionally with storage for an else statement,
/// condition variable and init expression.
static IfStmt *CreateEmpty(const ASTContext &Ctx, bool HasElse, bool HasVar,
bool HasInit);
/// True if this IfStmt has the storage for an init statement.
bool hasInitStorage() const { return IfStmtBits.HasInit; }
/// True if this IfStmt has storage for a variable declaration.
bool hasVarStorage() const { return IfStmtBits.HasVar; }
/// True if this IfStmt has storage for an else statement.
bool hasElseStorage() const { return IfStmtBits.HasElse; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getThen() { return getTrailingObjects<Stmt *>()[thenOffset()]; }
const Stmt *getThen() const {
return getTrailingObjects<Stmt *>()[thenOffset()];
}
void setThen(Stmt *Then) {
getTrailingObjects<Stmt *>()[thenOffset()] = Then;
}
Stmt *getElse() {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
const Stmt *getElse() const {
return hasElseStorage() ? getTrailingObjects<Stmt *>()[elseOffset()]
: nullptr;
}
void setElse(Stmt *Else) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
getTrailingObjects<Stmt *>()[elseOffset()] = Else;
}
/// Retrieve the variable declared in this "if" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// if (int x = foo()) {
/// printf("x is %d", x);
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<IfStmt *>(this)->getConditionVariable();
}
/// Set the condition variable for this if statement.
/// The if statement must have storage for the condition variable.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this IfStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This if statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
SourceLocation getIfLoc() const { return IfStmtBits.IfLoc; }
void setIfLoc(SourceLocation IfLoc) { IfStmtBits.IfLoc = IfLoc; }
SourceLocation getElseLoc() const {
return hasElseStorage() ? *getTrailingObjects<SourceLocation>()
: SourceLocation();
}
void setElseLoc(SourceLocation ElseLoc) {
assert(hasElseStorage() &&
"This if statement has no storage for an else statement!");
*getTrailingObjects<SourceLocation>() = ElseLoc;
}
bool isConstexpr() const { return IfStmtBits.IsConstexpr; }
void setConstexpr(bool C) { IfStmtBits.IsConstexpr = C; }
/// If this is an 'if constexpr', determine which substatement will be taken.
/// Otherwise, or if the condition is value-dependent, returns None.
Optional<const Stmt*> getNondiscardedCase(const ASTContext &Ctx) const;
bool isObjCAvailabilityCheck() const;
SourceLocation getBeginLoc() const { return getIfLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
if (getElse())
return getElse()->getEndLoc();
return getThen()->getEndLoc();
}
// Iterators over subexpressions. The iterators will include iterating
// over the initialization expression referenced by the condition variable.
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == IfStmtClass;
}
};
/// SwitchStmt - This represents a 'switch' stmt.
class SwitchStmt final : public Stmt,
private llvm::TrailingObjects<SwitchStmt, Stmt *> {
friend TrailingObjects;
/// Points to a linked list of case and default statements.
SwitchCase *FirstCase;
// SwitchStmt is followed by several trailing objects,
// some of which optional. Note that it would be more convenient to
// put the optional trailing objects at the end but this would change
// the order in children().
// The trailing objects are in order:
//
// * A "Stmt *" for the init statement.
// Present if and only if hasInitStorage().
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
enum { InitOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasInitStorage() + hasVarStorage();
}
unsigned initOffset() const { return InitOffset; }
unsigned varOffset() const { return InitOffset + hasInitStorage(); }
unsigned condOffset() const {
return InitOffset + hasInitStorage() + hasVarStorage();
}
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
/// Build a switch statement.
SwitchStmt(const ASTContext &Ctx, Stmt *Init, VarDecl *Var, Expr *Cond);
/// Build a empty switch statement.
explicit SwitchStmt(EmptyShell Empty, bool HasInit, bool HasVar);
public:
/// Create a switch statement.
static SwitchStmt *Create(const ASTContext &Ctx, Stmt *Init, VarDecl *Var,
Expr *Cond);
/// Create an empty switch statement optionally with storage for
/// an init expression and a condition variable.
static SwitchStmt *CreateEmpty(const ASTContext &Ctx, bool HasInit,
bool HasVar);
/// True if this SwitchStmt has storage for an init statement.
bool hasInitStorage() const { return SwitchStmtBits.HasInit; }
/// True if this SwitchStmt has storage for a condition variable.
bool hasVarStorage() const { return SwitchStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
Stmt *getInit() {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
const Stmt *getInit() const {
return hasInitStorage() ? getTrailingObjects<Stmt *>()[initOffset()]
: nullptr;
}
void setInit(Stmt *Init) {
assert(hasInitStorage() &&
"This switch statement has no storage for an init statement!");
getTrailingObjects<Stmt *>()[initOffset()] = Init;
}
/// Retrieve the variable declared in this "switch" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// switch (int x = foo()) {
/// case 0: break;
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<SwitchStmt *>(this)->getConditionVariable();
}
/// Set the condition variable in this switch statement.
/// The switch statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *VD);
/// If this SwitchStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SwitchCase *getSwitchCaseList() { return FirstCase; }
const SwitchCase *getSwitchCaseList() const { return FirstCase; }
void setSwitchCaseList(SwitchCase *SC) { FirstCase = SC; }
SourceLocation getSwitchLoc() const { return SwitchStmtBits.SwitchLoc; }
void setSwitchLoc(SourceLocation L) { SwitchStmtBits.SwitchLoc = L; }
void setBody(Stmt *S, SourceLocation SL) {
setBody(S);
setSwitchLoc(SL);
}
void addSwitchCase(SwitchCase *SC) {
assert(!SC->getNextSwitchCase() &&
"case/default already added to a switch");
SC->setNextSwitchCase(FirstCase);
FirstCase = SC;
}
/// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a
/// switch over an enum value then all cases have been explicitly covered.
void setAllEnumCasesCovered() { SwitchStmtBits.AllEnumCasesCovered = true; }
/// Returns true if the SwitchStmt is a switch of an enum value and all cases
/// have been explicitly covered.
bool isAllEnumCasesCovered() const {
return SwitchStmtBits.AllEnumCasesCovered;
}
SourceLocation getBeginLoc() const { return getSwitchLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody() ? getBody()->getEndLoc()
: reinterpret_cast<const Stmt *>(getCond())->getEndLoc();
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SwitchStmtClass;
}
};
/// WhileStmt - This represents a 'while' stmt.
class WhileStmt final : public Stmt,
private llvm::TrailingObjects<WhileStmt, Stmt *> {
friend TrailingObjects;
// WhileStmt is followed by several trailing objects,
// some of which optional. Note that it would be more
// convenient to put the optional trailing object at the end
// but this would affect children().
// The trailing objects are in order:
//
// * A "Stmt *" for the condition variable.
// Present if and only if hasVarStorage(). This is in fact a "DeclStmt *".
//
// * A "Stmt *" for the condition.
// Always present. This is in fact an "Expr *".
//
// * A "Stmt *" for the body.
// Always present.
//
enum { VarOffset = 0, BodyOffsetFromCond = 1 };
enum { NumMandatoryStmtPtr = 2 };
unsigned varOffset() const { return VarOffset; }
unsigned condOffset() const { return VarOffset + hasVarStorage(); }
unsigned bodyOffset() const { return condOffset() + BodyOffsetFromCond; }
unsigned numTrailingObjects(OverloadToken<Stmt *>) const {
return NumMandatoryStmtPtr + hasVarStorage();
}
/// Build a while statement.
WhileStmt(const ASTContext &Ctx, VarDecl *Var, Expr *Cond, Stmt *Body,
SourceLocation WL);
/// Build an empty while statement.
explicit WhileStmt(EmptyShell Empty, bool HasVar);
public:
/// Create a while statement.
static WhileStmt *Create(const ASTContext &Ctx, VarDecl *Var, Expr *Cond,
Stmt *Body, SourceLocation WL);
/// Create an empty while statement optionally with storage for
/// a condition variable.
static WhileStmt *CreateEmpty(const ASTContext &Ctx, bool HasVar);
/// True if this WhileStmt has storage for a condition variable.
bool hasVarStorage() const { return WhileStmtBits.HasVar; }
Expr *getCond() {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
const Expr *getCond() const {
return reinterpret_cast<Expr *>(getTrailingObjects<Stmt *>()[condOffset()]);
}
void setCond(Expr *Cond) {
getTrailingObjects<Stmt *>()[condOffset()] = reinterpret_cast<Stmt *>(Cond);
}
Stmt *getBody() { return getTrailingObjects<Stmt *>()[bodyOffset()]; }
const Stmt *getBody() const {
return getTrailingObjects<Stmt *>()[bodyOffset()];
}
void setBody(Stmt *Body) {
getTrailingObjects<Stmt *>()[bodyOffset()] = Body;
}
/// Retrieve the variable declared in this "while" statement, if any.
///
/// In the following example, "x" is the condition variable.
/// \code
/// while (int x = random()) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable();
const VarDecl *getConditionVariable() const {
return const_cast<WhileStmt *>(this)->getConditionVariable();
}
/// Set the condition variable of this while statement.
/// The while statement must have storage for it.
void setConditionVariable(const ASTContext &Ctx, VarDecl *V);
/// If this WhileStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
DeclStmt *getConditionVariableDeclStmt() {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
const DeclStmt *getConditionVariableDeclStmt() const {
return hasVarStorage() ? static_cast<DeclStmt *>(
getTrailingObjects<Stmt *>()[varOffset()])
: nullptr;
}
SourceLocation getWhileLoc() const { return WhileStmtBits.WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileStmtBits.WhileLoc = L; }
SourceLocation getBeginLoc() const { return getWhileLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return getBody()->getEndLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == WhileStmtClass;
}
// Iterators
child_range children() {
return child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
const_child_range children() const {
return const_child_range(getTrailingObjects<Stmt *>(),
getTrailingObjects<Stmt *>() +
numTrailingObjects(OverloadToken<Stmt *>()));
}
};
/// DoStmt - This represents a 'do/while' stmt.
class DoStmt : public Stmt {
enum { BODY, COND, END_EXPR };
Stmt *SubExprs[END_EXPR];
SourceLocation WhileLoc;
SourceLocation RParenLoc; // Location of final ')' in do stmt condition.
public:
DoStmt(Stmt *Body, Expr *Cond, SourceLocation DL, SourceLocation WL,
SourceLocation RP)
: Stmt(DoStmtClass), WhileLoc(WL), RParenLoc(RP) {
setCond(Cond);
setBody(Body);
setDoLoc(DL);
}
/// Build an empty do-while statement.
explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) {}
Expr *getCond() { return reinterpret_cast<Expr *>(SubExprs[COND]); }
const Expr *getCond() const {
return reinterpret_cast<Expr *>(SubExprs[COND]);
}
void setCond(Expr *Cond) { SubExprs[COND] = reinterpret_cast<Stmt *>(Cond); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setBody(Stmt *Body) { SubExprs[BODY] = Body; }
SourceLocation getDoLoc() const { return DoStmtBits.DoLoc; }
void setDoLoc(SourceLocation L) { DoStmtBits.DoLoc = L; }
SourceLocation getWhileLoc() const { return WhileLoc; }
void setWhileLoc(SourceLocation L) { WhileLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getDoLoc(); }
SourceLocation getEndLoc() const { return getRParenLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == DoStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of
/// the init/cond/inc parts of the ForStmt will be null if they were not
/// specified in the source.
class ForStmt : public Stmt {
enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR };
Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt.
SourceLocation LParenLoc, RParenLoc;
public:
ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar,
Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP,
SourceLocation RP);
/// Build an empty for statement.
explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) {}
Stmt *getInit() { return SubExprs[INIT]; }
/// Retrieve the variable declared in this "for" statement, if any.
///
/// In the following example, "y" is the condition variable.
/// \code
/// for (int x = random(); int y = mangle(x); ++x) {
/// // ...
/// }
/// \endcode
VarDecl *getConditionVariable() const;
void setConditionVariable(const ASTContext &C, VarDecl *V);
/// If this ForStmt has a condition variable, return the faux DeclStmt
/// associated with the creation of that condition variable.
const DeclStmt *getConditionVariableDeclStmt() const {
return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]);
}
Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); }
Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); }
Stmt *getBody() { return SubExprs[BODY]; }
const Stmt *getInit() const { return SubExprs[INIT]; }
const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);}
const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); }
const Stmt *getBody() const { return SubExprs[BODY]; }
void setInit(Stmt *S) { SubExprs[INIT] = S; }
void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); }
void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); }
void setBody(Stmt *S) { SubExprs[BODY] = S; }
SourceLocation getForLoc() const { return ForStmtBits.ForLoc; }
void setForLoc(SourceLocation L) { ForStmtBits.ForLoc = L; }
SourceLocation getLParenLoc() const { return LParenLoc; }
void setLParenLoc(SourceLocation L) { LParenLoc = L; }
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
SourceLocation getBeginLoc() const { return getForLoc(); }
SourceLocation getEndLoc() const { return getBody()->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ForStmtClass;
}
// Iterators
child_range children() {
return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR);
}
const_child_range children() const {
return const_child_range(&SubExprs[0], &SubExprs[0] + END_EXPR);
}
};
/// GotoStmt - This represents a direct goto.
class GotoStmt : public Stmt {
LabelDecl *Label;
SourceLocation LabelLoc;
public:
GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL)
: Stmt(GotoStmtClass), Label(label), LabelLoc(LL) {
setGotoLoc(GL);
}
/// Build an empty goto statement.
explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) {}
LabelDecl *getLabel() const { return Label; }
void setLabel(LabelDecl *D) { Label = D; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getLabelLoc() const { return LabelLoc; }
void setLabelLoc(SourceLocation L) { LabelLoc = L; }
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const { return getLabelLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GotoStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// IndirectGotoStmt - This represents an indirect goto.
class IndirectGotoStmt : public Stmt {
SourceLocation StarLoc;
Stmt *Target;
public:
IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target)
: Stmt(IndirectGotoStmtClass), StarLoc(starLoc) {
setTarget(target);
setGotoLoc(gotoLoc);
}
/// Build an empty indirect goto statement.
explicit IndirectGotoStmt(EmptyShell Empty)
: Stmt(IndirectGotoStmtClass, Empty) {}
void setGotoLoc(SourceLocation L) { GotoStmtBits.GotoLoc = L; }
SourceLocation getGotoLoc() const { return GotoStmtBits.GotoLoc; }
void setStarLoc(SourceLocation L) { StarLoc = L; }
SourceLocation getStarLoc() const { return StarLoc; }
Expr *getTarget() { return reinterpret_cast<Expr *>(Target); }
const Expr *getTarget() const {
return reinterpret_cast<const Expr *>(Target);
}
void setTarget(Expr *E) { Target = reinterpret_cast<Stmt *>(E); }
/// getConstantTarget - Returns the fixed target of this indirect
/// goto, if one exists.
LabelDecl *getConstantTarget();
const LabelDecl *getConstantTarget() const {
return const_cast<IndirectGotoStmt *>(this)->getConstantTarget();
}
SourceLocation getBeginLoc() const { return getGotoLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY { return Target->getEndLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == IndirectGotoStmtClass;
}
// Iterators
child_range children() { return child_range(&Target, &Target + 1); }
const_child_range children() const {
return const_child_range(&Target, &Target + 1);
}
};
/// ContinueStmt - This represents a continue.
class ContinueStmt : public Stmt {
public:
ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass) {
setContinueLoc(CL);
}
/// Build an empty continue statement.
explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) {}
SourceLocation getContinueLoc() const { return ContinueStmtBits.ContinueLoc; }
void setContinueLoc(SourceLocation L) { ContinueStmtBits.ContinueLoc = L; }
SourceLocation getBeginLoc() const { return getContinueLoc(); }
SourceLocation getEndLoc() const { return getContinueLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == ContinueStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// BreakStmt - This represents a break.
class BreakStmt : public Stmt {
public:
BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass) {
setBreakLoc(BL);
}
/// Build an empty break statement.
explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) {}
SourceLocation getBreakLoc() const { return BreakStmtBits.BreakLoc; }
void setBreakLoc(SourceLocation L) { BreakStmtBits.BreakLoc = L; }
SourceLocation getBeginLoc() const { return getBreakLoc(); }
SourceLocation getEndLoc() const { return getBreakLoc(); }
static bool classof(const Stmt *T) {
return T->getStmtClass() == BreakStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// ReturnStmt - This represents a return, optionally of an expression:
/// return;
/// return 4;
///
/// Note that GCC allows return with no argument in a function declared to
/// return a value, and it allows returning a value in functions declared to
/// return void. We explicitly model this in the AST, which means you can't
/// depend on the return type of the function and the presence of an argument.
class ReturnStmt final
: public Stmt,
private llvm::TrailingObjects<ReturnStmt, const VarDecl *> {
friend TrailingObjects;
/// The return expression.
Stmt *RetExpr;
// ReturnStmt is followed optionally by a trailing "const VarDecl *"
// for the NRVO candidate. Present if and only if hasNRVOCandidate().
/// True if this ReturnStmt has storage for an NRVO candidate.
bool hasNRVOCandidate() const { return ReturnStmtBits.HasNRVOCandidate; }
unsigned numTrailingObjects(OverloadToken<const VarDecl *>) const {
return hasNRVOCandidate();
}
/// Build a return statement.
ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate);
/// Build an empty return statement.
explicit ReturnStmt(EmptyShell Empty, bool HasNRVOCandidate);
public:
/// Create a return statement.
static ReturnStmt *Create(const ASTContext &Ctx, SourceLocation RL, Expr *E,
const VarDecl *NRVOCandidate);
/// Create an empty return statement, optionally with
/// storage for an NRVO candidate.
static ReturnStmt *CreateEmpty(const ASTContext &Ctx, bool HasNRVOCandidate);
Expr *getRetValue() { return reinterpret_cast<Expr *>(RetExpr); }
const Expr *getRetValue() const { return reinterpret_cast<Expr *>(RetExpr); }
void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt *>(E); }
/// Retrieve the variable that might be used for the named return
/// value optimization.
///
/// The optimization itself can only be performed if the variable is
/// also marked as an NRVO object.
const VarDecl *getNRVOCandidate() const {
return hasNRVOCandidate() ? *getTrailingObjects<const VarDecl *>()
: nullptr;
}
/// Set the variable that might be used for the named return value
/// optimization. The return statement must have storage for it,
/// which is the case if and only if hasNRVOCandidate() is true.
void setNRVOCandidate(const VarDecl *Var) {
assert(hasNRVOCandidate() &&
"This return statement has no storage for an NRVO candidate!");
*getTrailingObjects<const VarDecl *>() = Var;
}
SourceLocation getReturnLoc() const { return ReturnStmtBits.RetLoc; }
void setReturnLoc(SourceLocation L) { ReturnStmtBits.RetLoc = L; }
SourceLocation getBeginLoc() const { return getReturnLoc(); }
SourceLocation getEndLoc() const LLVM_READONLY {
return RetExpr ? RetExpr->getEndLoc() : getReturnLoc();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == ReturnStmtClass;
}
// Iterators
child_range children() {
if (RetExpr)
return child_range(&RetExpr, &RetExpr + 1);
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
if (RetExpr)
return const_child_range(&RetExpr, &RetExpr + 1);
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt.
class AsmStmt : public Stmt {
protected:
friend class ASTStmtReader;
SourceLocation AsmLoc;
/// True if the assembly statement does not have any input or output
/// operands.
bool IsSimple;
/// If true, treat this inline assembly as having side effects.
/// This assembly statement should not be optimized, deleted or moved.
bool IsVolatile;
unsigned NumOutputs;
unsigned NumInputs;
unsigned NumClobbers;
Stmt **Exprs = nullptr;
AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile,
unsigned numoutputs, unsigned numinputs, unsigned numclobbers)
: Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile),
NumOutputs(numoutputs), NumInputs(numinputs),
NumClobbers(numclobbers) {}
public:
/// Build an empty inline-assembly statement.
explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty) {}
SourceLocation getAsmLoc() const { return AsmLoc; }
void setAsmLoc(SourceLocation L) { AsmLoc = L; }
bool isSimple() const { return IsSimple; }
void setSimple(bool V) { IsSimple = V; }
bool isVolatile() const { return IsVolatile; }
void setVolatile(bool V) { IsVolatile = V; }
SourceLocation getBeginLoc() const LLVM_READONLY { return {}; }
SourceLocation getEndLoc() const LLVM_READONLY { return {}; }
//===--- Asm String Analysis ---===//
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
unsigned getNumOutputs() const { return NumOutputs; }
/// getOutputConstraint - Return the constraint string for the specified
/// output operand. All output constraints are known to be non-empty (either
/// '=' or '+').
StringRef getOutputConstraint(unsigned i) const;
/// isOutputPlusConstraint - Return true if the specified output constraint
/// is a "+" constraint (which is both an input and an output) or false if it
/// is an "=" constraint (just an output).
bool isOutputPlusConstraint(unsigned i) const {
return getOutputConstraint(i)[0] == '+';
}
const Expr *getOutputExpr(unsigned i) const;
/// getNumPlusOperands - Return the number of output operands that have a "+"
/// constraint.
unsigned getNumPlusOperands() const;
//===--- Input operands ---===//
unsigned getNumInputs() const { return NumInputs; }
/// getInputConstraint - Return the specified input constraint. Unlike output
/// constraints, these can be empty.
StringRef getInputConstraint(unsigned i) const;
const Expr *getInputExpr(unsigned i) const;
//===--- Other ---===//
unsigned getNumClobbers() const { return NumClobbers; }
StringRef getClobber(unsigned i) const;
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass ||
T->getStmtClass() == MSAsmStmtClass;
}
// Input expr iterators.
using inputs_iterator = ExprIterator;
using const_inputs_iterator = ConstExprIterator;
using inputs_range = llvm::iterator_range<inputs_iterator>;
using inputs_const_range = llvm::iterator_range<const_inputs_iterator>;
inputs_iterator begin_inputs() {
return &Exprs[0] + NumOutputs;
}
inputs_iterator end_inputs() {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); }
const_inputs_iterator begin_inputs() const {
return &Exprs[0] + NumOutputs;
}
const_inputs_iterator end_inputs() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
inputs_const_range inputs() const {
return inputs_const_range(begin_inputs(), end_inputs());
}
// Output expr iterators.
using outputs_iterator = ExprIterator;
using const_outputs_iterator = ConstExprIterator;
using outputs_range = llvm::iterator_range<outputs_iterator>;
using outputs_const_range = llvm::iterator_range<const_outputs_iterator>;
outputs_iterator begin_outputs() {
return &Exprs[0];
}
outputs_iterator end_outputs() {
return &Exprs[0] + NumOutputs;
}
outputs_range outputs() {
return outputs_range(begin_outputs(), end_outputs());
}
const_outputs_iterator begin_outputs() const {
return &Exprs[0];
}
const_outputs_iterator end_outputs() const {
return &Exprs[0] + NumOutputs;
}
outputs_const_range outputs() const {
return outputs_const_range(begin_outputs(), end_outputs());
}
child_range children() {
return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs);
}
};
/// This represents a GCC inline-assembly statement extension.
class GCCAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation RParenLoc;
StringLiteral *AsmStr;
// FIXME: If we wanted to, we could allocate all of these in one big array.
StringLiteral **Constraints = nullptr;
StringLiteral **Clobbers = nullptr;
IdentifierInfo **Names = nullptr;
unsigned NumLabels = 0;
public:
GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple,
bool isvolatile, unsigned numoutputs, unsigned numinputs,
IdentifierInfo **names, StringLiteral **constraints, Expr **exprs,
StringLiteral *asmstr, unsigned numclobbers,
StringLiteral **clobbers, unsigned numlabels,
SourceLocation rparenloc);
/// Build an empty inline-assembly statement.
explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty) {}
SourceLocation getRParenLoc() const { return RParenLoc; }
void setRParenLoc(SourceLocation L) { RParenLoc = L; }
//===--- Asm String Analysis ---===//
const StringLiteral *getAsmString() const { return AsmStr; }
StringLiteral *getAsmString() { return AsmStr; }
void setAsmString(StringLiteral *E) { AsmStr = E; }
/// AsmStringPiece - this is part of a decomposed asm string specification
/// (for use with the AnalyzeAsmString function below). An asm string is
/// considered to be a concatenation of these parts.
class AsmStringPiece {
public:
enum Kind {
String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%".
Operand // Operand reference, with optional modifier %c4.
};
private:
Kind MyKind;
std::string Str;
unsigned OperandNo;
// Source range for operand references.
CharSourceRange Range;
public:
AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {}
AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin,
SourceLocation End)
: MyKind(Operand), Str(S), OperandNo(OpNo),
Range(CharSourceRange::getCharRange(Begin, End)) {}
bool isString() const { return MyKind == String; }
bool isOperand() const { return MyKind == Operand; }
const std::string &getString() const { return Str; }
unsigned getOperandNo() const {
assert(isOperand());
return OperandNo;
}
CharSourceRange getRange() const {
assert(isOperand() && "Range is currently used only for Operands.");
return Range;
}
/// getModifier - Get the modifier for this operand, if present. This
/// returns '\0' if there was no modifier.
char getModifier() const;
};
/// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing
/// it into pieces. If the asm string is erroneous, emit errors and return
/// true, otherwise return false. This handles canonicalization and
/// translation of strings from GCC syntax to LLVM IR syntax, and handles
//// flattening of named references like %[foo] to Operand AsmStringPiece's.
unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces,
const ASTContext &C, unsigned &DiagOffs) const;
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; }
StringRef getOutputName(unsigned i) const {
if (IdentifierInfo *II = getOutputIdentifier(i))
return II->getName();
return {};
}
StringRef getOutputConstraint(unsigned i) const;
const StringLiteral *getOutputConstraintLiteral(unsigned i) const {
return Constraints[i];
}
StringLiteral *getOutputConstraintLiteral(unsigned i) {
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
IdentifierInfo *getInputIdentifier(unsigned i) const {
return Names[i + NumOutputs];
}
StringRef getInputName(unsigned i) const {
if (IdentifierInfo *II = getInputIdentifier(i))
return II->getName();
return {};
}
StringRef getInputConstraint(unsigned i) const;
const StringLiteral *getInputConstraintLiteral(unsigned i) const {
return Constraints[i + NumOutputs];
}
StringLiteral *getInputConstraintLiteral(unsigned i) {
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<GCCAsmStmt*>(this)->getInputExpr(i);
}
//===--- Labels ---===//
bool isAsmGoto() const {
return NumLabels > 0;
}
unsigned getNumLabels() const {
return NumLabels;
}
IdentifierInfo *getLabelIdentifier(unsigned i) const {
return Names[i + NumOutputs + NumInputs];
}
AddrLabelExpr *getLabelExpr(unsigned i) const;
StringRef getLabelName(unsigned i) const;
using labels_iterator = CastIterator<AddrLabelExpr>;
using const_labels_iterator = ConstCastIterator<AddrLabelExpr>;
using labels_range = llvm::iterator_range<labels_iterator>;
using labels_const_range = llvm::iterator_range<const_labels_iterator>;
labels_iterator begin_labels() {
return &Exprs[0] + NumOutputs + NumInputs;
}
labels_iterator end_labels() {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_range labels() {
return labels_range(begin_labels(), end_labels());
}
const_labels_iterator begin_labels() const {
return &Exprs[0] + NumOutputs + NumInputs;
}
const_labels_iterator end_labels() const {
return &Exprs[0] + NumOutputs + NumInputs + NumLabels;
}
labels_const_range labels() const {
return labels_const_range(begin_labels(), end_labels());
}
private:
void setOutputsAndInputsAndClobbers(const ASTContext &C,
IdentifierInfo **Names,
StringLiteral **Constraints,
Stmt **Exprs,
unsigned NumOutputs,
unsigned NumInputs,
unsigned NumLabels,
StringLiteral **Clobbers,
unsigned NumClobbers);
public:
//===--- Other ---===//
/// getNamedOperand - Given a symbolic operand reference like %[foo],
/// translate this into a numeric value needed to reference the same operand.
/// This returns -1 if the operand name is invalid.
int getNamedOperand(StringRef SymbolicName) const;
StringRef getClobber(unsigned i) const;
StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; }
const StringLiteral *getClobberStringLiteral(unsigned i) const {
return Clobbers[i];
}
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return RParenLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == GCCAsmStmtClass;
}
};
/// This represents a Microsoft inline-assembly statement extension.
class MSAsmStmt : public AsmStmt {
friend class ASTStmtReader;
SourceLocation LBraceLoc, EndLoc;
StringRef AsmStr;
unsigned NumAsmToks = 0;
Token *AsmToks = nullptr;
StringRef *Constraints = nullptr;
StringRef *Clobbers = nullptr;
public:
MSAsmStmt(const ASTContext &C, SourceLocation asmloc,
SourceLocation lbraceloc, bool issimple, bool isvolatile,
ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs,
ArrayRef<StringRef> constraints,
ArrayRef<Expr*> exprs, StringRef asmstr,
ArrayRef<StringRef> clobbers, SourceLocation endloc);
/// Build an empty MS-style inline-assembly statement.
explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty) {}
SourceLocation getLBraceLoc() const { return LBraceLoc; }
void setLBraceLoc(SourceLocation L) { LBraceLoc = L; }
SourceLocation getEndLoc() const { return EndLoc; }
void setEndLoc(SourceLocation L) { EndLoc = L; }
bool hasBraces() const { return LBraceLoc.isValid(); }
unsigned getNumAsmToks() { return NumAsmToks; }
Token *getAsmToks() { return AsmToks; }
//===--- Asm String Analysis ---===//
StringRef getAsmString() const { return AsmStr; }
/// Assemble final IR asm string.
std::string generateAsmString(const ASTContext &C) const;
//===--- Output operands ---===//
StringRef getOutputConstraint(unsigned i) const {
assert(i < NumOutputs);
return Constraints[i];
}
Expr *getOutputExpr(unsigned i);
const Expr *getOutputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getOutputExpr(i);
}
//===--- Input operands ---===//
StringRef getInputConstraint(unsigned i) const {
assert(i < NumInputs);
return Constraints[i + NumOutputs];
}
Expr *getInputExpr(unsigned i);
void setInputExpr(unsigned i, Expr *E);
const Expr *getInputExpr(unsigned i) const {
return const_cast<MSAsmStmt*>(this)->getInputExpr(i);
}
//===--- Other ---===//
ArrayRef<StringRef> getAllConstraints() const {
return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs);
}
ArrayRef<StringRef> getClobbers() const {
return llvm::makeArrayRef(Clobbers, NumClobbers);
}
ArrayRef<Expr*> getAllExprs() const {
return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs),
NumInputs + NumOutputs);
}
StringRef getClobber(unsigned i) const { return getClobbers()[i]; }
private:
void initialize(const ASTContext &C, StringRef AsmString,
ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints,
ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers);
public:
SourceLocation getBeginLoc() const LLVM_READONLY { return AsmLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == MSAsmStmtClass;
}
child_range children() {
return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
const_child_range children() const {
return const_child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]);
}
};
class SEHExceptStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Children[2];
enum { FILTER_EXPR, BLOCK };
SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block);
explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) {}
public:
static SEHExceptStmt* Create(const ASTContext &C,
SourceLocation ExceptLoc,
Expr *FilterExpr,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getExceptLoc(); }
SourceLocation getExceptLoc() const { return Loc; }
SourceLocation getEndLoc() const { return getBlock()->getEndLoc(); }
Expr *getFilterExpr() const {
return reinterpret_cast<Expr*>(Children[FILTER_EXPR]);
}
CompoundStmt *getBlock() const {
return cast<CompoundStmt>(Children[BLOCK]);
}
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHExceptStmtClass;
}
};
class SEHFinallyStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
SourceLocation Loc;
Stmt *Block;
SEHFinallyStmt(SourceLocation Loc, Stmt *Block);
explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) {}
public:
static SEHFinallyStmt* Create(const ASTContext &C,
SourceLocation FinallyLoc,
Stmt *Block);
SourceLocation getBeginLoc() const LLVM_READONLY { return getFinallyLoc(); }
SourceLocation getFinallyLoc() const { return Loc; }
SourceLocation getEndLoc() const { return Block->getEndLoc(); }
CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); }
child_range children() {
return child_range(&Block,&Block+1);
}
const_child_range children() const {
return const_child_range(&Block, &Block + 1);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHFinallyStmtClass;
}
};
class SEHTryStmt : public Stmt {
friend class ASTReader;
friend class ASTStmtReader;
bool IsCXXTry;
SourceLocation TryLoc;
Stmt *Children[2];
enum { TRY = 0, HANDLER = 1 };
SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try'
SourceLocation TryLoc,
Stmt *TryBlock,
Stmt *Handler);
explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) {}
public:
static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry,
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
SourceLocation getBeginLoc() const LLVM_READONLY { return getTryLoc(); }
SourceLocation getTryLoc() const { return TryLoc; }
SourceLocation getEndLoc() const { return Children[HANDLER]->getEndLoc(); }
bool getIsCXXTry() const { return IsCXXTry; }
CompoundStmt* getTryBlock() const {
return cast<CompoundStmt>(Children[TRY]);
}
Stmt *getHandler() const { return Children[HANDLER]; }
/// Returns 0 if not defined
SEHExceptStmt *getExceptHandler() const;
SEHFinallyStmt *getFinallyHandler() const;
child_range children() {
return child_range(Children, Children+2);
}
const_child_range children() const {
return const_child_range(Children, Children + 2);
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHTryStmtClass;
}
};
/// Represents a __leave statement.
class SEHLeaveStmt : public Stmt {
SourceLocation LeaveLoc;
public:
explicit SEHLeaveStmt(SourceLocation LL)
: Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {}
/// Build an empty __leave statement.
explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) {}
SourceLocation getLeaveLoc() const { return LeaveLoc; }
void setLeaveLoc(SourceLocation L) { LeaveLoc = L; }
SourceLocation getBeginLoc() const LLVM_READONLY { return LeaveLoc; }
SourceLocation getEndLoc() const LLVM_READONLY { return LeaveLoc; }
static bool classof(const Stmt *T) {
return T->getStmtClass() == SEHLeaveStmtClass;
}
// Iterators
child_range children() {
return child_range(child_iterator(), child_iterator());
}
const_child_range children() const {
return const_child_range(const_child_iterator(), const_child_iterator());
}
};
/// This captures a statement into a function. For example, the following
/// pragma annotated compound statement can be represented as a CapturedStmt,
/// and this compound statement is the body of an anonymous outlined function.
/// @code
/// #pragma omp parallel
/// {
/// compute();
/// }
/// @endcode
class CapturedStmt : public Stmt {
public:
/// The different capture forms: by 'this', by reference, capture for
/// variable-length array type etc.
enum VariableCaptureKind {
VCK_This,
VCK_ByRef,
VCK_ByCopy,
VCK_VLAType,
};
/// Describes the capture of either a variable, or 'this', or
/// variable-length array type.
class Capture {
llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind;
SourceLocation Loc;
public:
friend class ASTStmtReader;
/// Create a new capture.
///
/// \param Loc The source location associated with this capture.
///
/// \param Kind The kind of capture (this, ByRef, ...).
///
/// \param Var The variable being captured, or null if capturing this.
Capture(SourceLocation Loc, VariableCaptureKind Kind,
VarDecl *Var = nullptr);
/// Determine the kind of capture.
VariableCaptureKind getCaptureKind() const;
/// Retrieve the source location at which the variable or 'this' was
/// first used.
SourceLocation getLocation() const { return Loc; }
/// Determine whether this capture handles the C++ 'this' pointer.
bool capturesThis() const { return getCaptureKind() == VCK_This; }
/// Determine whether this capture handles a variable (by reference).
bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; }
/// Determine whether this capture handles a variable by copy.
bool capturesVariableByCopy() const {
return getCaptureKind() == VCK_ByCopy;
}
/// Determine whether this capture handles a variable-length array
/// type.
bool capturesVariableArrayType() const {
return getCaptureKind() == VCK_VLAType;
}
/// Retrieve the declaration of the variable being captured.
///
/// This operation is only valid if this capture captures a variable.
VarDecl *getCapturedVar() const;
};
private:
/// The number of variable captured, including 'this'.
unsigned NumCaptures;
/// The pointer part is the implicit the outlined function and the
/// int part is the captured region kind, 'CR_Default' etc.
llvm::PointerIntPair<CapturedDecl *, 2, CapturedRegionKind> CapDeclAndKind;
/// The record for captured variables, a RecordDecl or CXXRecordDecl.
RecordDecl *TheRecordDecl = nullptr;
/// Construct a captured statement.
CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD);
/// Construct an empty captured statement.
CapturedStmt(EmptyShell Empty, unsigned NumCaptures);
Stmt **getStoredStmts() { return reinterpret_cast<Stmt **>(this + 1); }
Stmt *const *getStoredStmts() const {
return reinterpret_cast<Stmt *const *>(this + 1);
}
Capture *getStoredCaptures() const;
void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; }
public:
friend class ASTStmtReader;
static CapturedStmt *Create(const ASTContext &Context, Stmt *S,
CapturedRegionKind Kind,
ArrayRef<Capture> Captures,
ArrayRef<Expr *> CaptureInits,
CapturedDecl *CD, RecordDecl *RD);
static CapturedStmt *CreateDeserialized(const ASTContext &Context,
unsigned NumCaptures);
/// Retrieve the statement being captured.
Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; }
const Stmt *getCapturedStmt() const { return getStoredStmts()[NumCaptures]; }
/// Retrieve the outlined function declaration.
CapturedDecl *getCapturedDecl();
const CapturedDecl *getCapturedDecl() const;
/// Set the outlined function declaration.
void setCapturedDecl(CapturedDecl *D);
/// Retrieve the captured region kind.
CapturedRegionKind getCapturedRegionKind() const;
/// Set the captured region kind.
void setCapturedRegionKind(CapturedRegionKind Kind);
/// Retrieve the record declaration for captured variables.
const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; }
/// Set the record declaration for captured variables.
void setCapturedRecordDecl(RecordDecl *D) {
assert(D && "null RecordDecl");
TheRecordDecl = D;
}
/// True if this variable has been captured.
bool capturesVariable(const VarDecl *Var) const;
/// An iterator that walks over the captures.
using capture_iterator = Capture *;
using const_capture_iterator = const Capture *;
using capture_range = llvm::iterator_range<capture_iterator>;
using capture_const_range = llvm::iterator_range<const_capture_iterator>;
capture_range captures() {
return capture_range(capture_begin(), capture_end());
}
capture_const_range captures() const {
return capture_const_range(capture_begin(), capture_end());
}
/// Retrieve an iterator pointing to the first capture.
capture_iterator capture_begin() { return getStoredCaptures(); }
const_capture_iterator capture_begin() const { return getStoredCaptures(); }
/// Retrieve an iterator pointing past the end of the sequence of
/// captures.
capture_iterator capture_end() const {
return getStoredCaptures() + NumCaptures;
}
/// Retrieve the number of captures, including 'this'.
unsigned capture_size() const { return NumCaptures; }
/// Iterator that walks over the capture initialization arguments.
using capture_init_iterator = Expr **;
using capture_init_range = llvm::iterator_range<capture_init_iterator>;
/// Const iterator that walks over the capture initialization
/// arguments.
using const_capture_init_iterator = Expr *const *;
using const_capture_init_range =
llvm::iterator_range<const_capture_init_iterator>;
capture_init_range capture_inits() {
return capture_init_range(capture_init_begin(), capture_init_end());
}
const_capture_init_range capture_inits() const {
return const_capture_init_range(capture_init_begin(), capture_init_end());
}
/// Retrieve the first initialization argument.
capture_init_iterator capture_init_begin() {
return reinterpret_cast<Expr **>(getStoredStmts());
}
const_capture_init_iterator capture_init_begin() const {
return reinterpret_cast<Expr *const *>(getStoredStmts());
}
/// Retrieve the iterator pointing one past the last initialization
/// argument.
capture_init_iterator capture_init_end() {
return capture_init_begin() + NumCaptures;
}
const_capture_init_iterator capture_init_end() const {
return capture_init_begin() + NumCaptures;
}
SourceLocation getBeginLoc() const LLVM_READONLY {
return getCapturedStmt()->getBeginLoc();
}
SourceLocation getEndLoc() const LLVM_READONLY {
return getCapturedStmt()->getEndLoc();
}
SourceRange getSourceRange() const LLVM_READONLY {
return getCapturedStmt()->getSourceRange();
}
static bool classof(const Stmt *T) {
return T->getStmtClass() == CapturedStmtClass;
}
child_range children();
const_child_range children() const;
};
} // namespace clang
#endif // LLVM_CLANG_AST_STMT_H
|
ofmo-calc-frag.c | /**
* @file ofmo-calc-frag.c
* @brief フラグメント電子状態計算関係の関数を記述したファイル
*
* 2中心クーロン積分の計算を積分タイプごとに分割して
* 計算するようにした。
*
* 4中心クーロン相互作用項を、最後の1つを除いて、worker=0でも
* 計算させるようにした
*
* */
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <limits.h>
#ifdef USE_MPI
#include <mpi.h>
#else
#include "mpi-dummy.h"
#endif
#ifdef _OPENMP
#include <omp.h>
#else
#include "omp-dummy.h"
#endif
#include "ofmo-def.h"
#include "ofmo-data.h"
#include "ofmo-integ.h"
#include "ofmo-scf.h"
#include "ofmo-mat.h"
#include "ofmo-monomer-data.h"
#include "ofmo-prof.h"
#include "ofmo-misc.h"
#include "ofmo-twoint.h"
#ifdef USE_CUDA
#include "cuda/cuda-drv.h"
#include "cuda/cudalib.h"
#include "cuda/cuda-ifc4c.h"
void cuda_print_wifc4c(void);
#endif
//#define EPS_PS4 1.e-30
//#define EPS_ERI 1.e-15
//#define EPS_PS4 1.e-20
//#define EPS_ERI 1.e-12
//#define EPS_SCH 1.e-12
#define EPS_FAC_IFC4C 0.5e0
#define NPARTIAL 1000
#ifdef DEBUG_MODE
extern FILE* fp_debug;
#endif
// global counter
extern int ofmo_gc_init( const int id,
MPI_Comm comm, const int init_val,
const int njobs );
extern int ofmo_gc_nxtval( const int id );
// debug
extern void ofmo_gc_set_debug_mode();
extern void ofmo_gc_reset_debug_mode();
extern int ofmo_projection_operator(
const int nmonomer, const int monomer_list[],
const int nao, const int sao2uao[], const double Ss[],
double Ps[]);
extern int ofmo_fragment_init( int nmonomer, int monomer_list[] );
extern int ofmo_make_approx_level(
const int nmonomer, const int monomer_list[],
int *nifc4c, int joblist_ifc4c[],
int *nifc3c, int joblist_ifc3c[], MPI_Comm comm );
extern int ofmo_get_approx_level( const int ifrag );
extern int ofmo_monomer_initial_density(int frag, double D[], double aop[],
double atp[] );
/* calc. and return the sum of integer array elements */
static int isum( const int n, const int ix[] ) {
//int i, sum=0;
//for ( i=0; i<n; i++ ) sum += ix[i];
//return sum;
return ofmo_isum(n, ix);
}
/* B += A */
static void acc_array( const int n, const double A[], double B[] ) {
//for ( int i=0; i<n; i++ ) B[i] += A[i];
ofmo_daxpy(n, 1.0e0, A, B);
}
static int is_in_fragment( const int ifrag, const int nmonomer,
const int monomer_list[] ) {
for ( int i=0; i<nmonomer; i++ )
if ( monomer_list[i] == ifrag ) return true;
return false;
}
/* --------------------------------------------------------------
* ハイブリッド並列時のワーカー数、ワーカーIDの取得に関わる関数群
* -------------------------------------------------------------- */
static int _nworkers_ = 1;
static int *_workerid_ = NULL;
static int *_nprocs_ = NULL;
static int *_itemp_ = NULL;
static void ofmo_mt_finalize() {
Free( _nprocs_ );
Free( _workerid_ );
Free( _itemp_ );
}
static int ofmo_mt_init( MPI_Comm comm ) {
static int called = false, maxthreads;
if ( !called ) {
int nprocs;
MPI_Comm_size( MPI_COMM_WORLD, &nprocs );
_nprocs_ = (int*)malloc( sizeof(int) * nprocs );
_itemp_ = (int*)malloc( sizeof(int) * nprocs );
maxthreads = omp_get_max_threads();
_workerid_ = (int*)malloc( sizeof(int) * maxthreads );
atexit( ofmo_mt_finalize );
called = true;
}
int myrank, nprocs;
MPI_Comm_size( comm, &nprocs );
MPI_Comm_rank( comm, &myrank );
/* get # of threads in each process (_nprocs_[rank]) */
for ( int i=0; i<nprocs; i++ ) _itemp_[i] = 0;
#pragma omp parallel
{
int nthreads;
nthreads = omp_get_num_threads();
#pragma omp master
_itemp_[myrank] = nthreads;
}
MPI_Allreduce( _itemp_, _nprocs_, nprocs, MPI_INT, MPI_SUM, comm );
/* determine nworkers and workerid */
int lwkid;
_nworkers_ = isum( nprocs, _nprocs_ );
if ( myrank==0 ) lwkid = 0;
else lwkid = isum( myrank, _nprocs_ );
for ( int i=0; i<maxthreads; i++ ) _workerid_[i] = lwkid + i;
return 0;
}
static int ofmo_mt_get_nworkers() { return _nworkers_; }
static int ofmo_mt_get_workerid( const int mythread ) {
return _workerid_[mythread];
}
/* -----------------------------------------------------
* モノマーをAO数の大きい順に並び替えたリストの作成
* ----------------------------------------------------- */
/** 2つの整数を比較する関数
* */
static int comp2( const void *p1, const void *p2 ) {
return ( (*(int*)p2) - (*(int*)p1) ); // good
}
static int *frag_order = NULL;
static void dealloc_frag_order() {
if ( frag_order != NULL ) free( frag_order );
frag_order = NULL;
}
static int init_frag_order() {
static int called = false;
if ( called ) return 0;
int nfrag, i, i2, *nfao;
if ( ofmo_data_get_vals("nfrag nfao", &nfrag, &nfao) != 0 ) {
dbg("error\n");
return -1;
}
frag_order = (int*)malloc( sizeof(int) * nfrag *2 );
if ( frag_order == NULL ) return -1;
for ( i=0, i2=0; i<nfrag; i++, i2+=2 ) {
frag_order[i2+0] = nfao[i];
frag_order[i2+1] = i;
}
qsort( frag_order, nfrag, sizeof(int)*2, comp2 );
for ( i=0, i2=0; i<nfrag; i++, i2+=2 )
frag_order[i] = frag_order[i2+1];
atexit( dealloc_frag_order );
called = true;
return 0;
}
static double *_Sfrag_ = NULL; /* overlap matrix of fragment */
static double *_Hfrag_ = NULL; /* 1e-Hamilton matrix of fragment */
static double *_Ufrag_ = NULL; /* external potential matrix of fragment */
static double *_Pfrag_ = NULL; /* proj. operator matrix of fragment */
static double *_D0_ = NULL; /* initial density matrix of fragment */
static double *_D_ = NULL; /* density matrix of fragment */
static double *_ev_ = NULL;
static double *_WORK_ = NULL;
static double *_C_ = NULL; /* MO coefficient matrix */
static double **_Dmon_ = NULL; /* monomer density matrices for IFC4C */
/* cutoff table data of fragment */
static int *lcs_pair = NULL;
static int *csp_ics = NULL;
static int *csp_jcs = NULL;
static int *csp_lps_pair = NULL;
static double *csp_schwarz = NULL;
//
static double *psp_zeta = NULL;
static double *psp_dkps = NULL;
static double *psp_xiza = NULL;
/* cutoff table data of monomers of IFC4C */
static int **lcs_pair_mon = NULL;
static int **csp_ics_mon = NULL;
static int **csp_jcs_mon = NULL;
static int **csp_lps_pair_mon = NULL;
static double **csp_schwarz_mon = NULL;
//
static double **psp_zeta_mon = NULL;
static double **psp_dkps_mon = NULL;
static double **psp_xiza_mon = NULL;
/* スレッドごとに確保する必要のある配列 */
//static int _maxnifc4c_;
static double **dUmaster = NULL;
static double **dUmaster2 = NULL;
static double **_atpop_local_ = NULL;
// 全モノマーのAO population および atomic population
static double **aopop_mon = NULL;
static double **atpop_mon = NULL;
// フラグメントのpopulationデータ
// このデータは、ofmo-dataで領域を解放する
static double *_daopop_ = NULL;
static double *_datpop_ = NULL;
/* vector of integer used to make initial density matrix */
static int *_aoconv_ = NULL;
/* job list of IFC4C and IFC3C */
static int *_joblist_ifc4c_ = NULL;
static int *_joblist_ifc3c_ = NULL;
/* monomer energy0 */
static double *_menergy0_ = NULL;
static void dealloc() {
Free( _Sfrag_ );
Free( _Hfrag_ );
Free( _Ufrag_ );
Free( _Pfrag_ );
Free( _WORK_ );
Free( _D0_ );
Free( _D_ );
Free( _ev_ );
Free( _C_ );
ofmo_free_dmatrix( _Dmon_ ); _Dmon_ = NULL;
Free( lcs_pair );
Free( csp_ics );
Free( csp_jcs );
Free( csp_lps_pair );
Free( csp_schwarz );
Free( psp_zeta );
Free( psp_dkps );
Free( psp_xiza );
ofmo_free_imatrix( lcs_pair_mon ); lcs_pair_mon = NULL;
ofmo_free_imatrix( csp_ics_mon ); csp_ics_mon = NULL;
ofmo_free_imatrix( csp_jcs_mon ); csp_jcs_mon = NULL;
ofmo_free_imatrix( csp_lps_pair_mon ); csp_lps_pair_mon = NULL;
ofmo_free_dmatrix( csp_schwarz_mon ); csp_schwarz_mon = NULL;
ofmo_free_dmatrix( psp_zeta_mon ); psp_zeta_mon = NULL;
ofmo_free_dmatrix( psp_dkps_mon ); psp_dkps_mon = NULL;
ofmo_free_dmatrix( psp_xiza_mon ); psp_xiza_mon = NULL;
ofmo_free_dmatrix( dUmaster );
ofmo_free_dmatrix( dUmaster2 );
ofmo_free_dmatrix( aopop_mon );
ofmo_free_dmatrix( atpop_mon );
ofmo_free_dmatrix( _atpop_local_ );
Free( _aoconv_ );
Free( _joblist_ifc4c_ );
Free( _joblist_ifc3c_ );
Free( _menergy0_ );
Free( _daopop_ );
Free( _datpop_ );
}
static int alloc() {
static int called = false;
if ( called ) return 0;
int ierr, maxlqn, maxnfatom, maxnfao, maxnfcs, maxnfps, maxnpspair;
int nbody, nfrag, nao_total;
int *nfao, *nfatom, total_nfao, total_nfatom;
size_t total = 0, t;
ierr = ofmo_data_get_vals(
"maxlqn maxnfatom maxnfcs maxnfao maxnfps "
"maxnpspair nbody nao nfrag nfao nfatom",
&maxlqn, &maxnfatom, &maxnfcs, &maxnfao, &maxnfps,
&maxnpspair, &nbody, &nao_total, &nfrag,
&nfao, &nfatom );
if ( ierr != 0 ) {
if ( fp_prof ) {
fdbg( fp_prof, "error\n");
fflush( fp_prof );
}
return -1;
}
int fnao2, nao2, maxlqn2, ncs2, fncs2, fnpspair;
total_nfao = ofmo_isum( nfrag, nfao );
total_nfatom = ofmo_isum( nfrag, nfatom );
nao2 = maxnfao * (maxnfao+1) / 2;
fnao2 = nbody*maxnfao * (nbody*maxnfao + 1) / 2;
ncs2 = maxnfcs * (maxnfcs+1) / 2;
fncs2 = nbody*maxnfcs * (nbody*maxnfcs+1) / 2;
maxlqn2 = (maxlqn+1) * (maxlqn+2) / 2;
fnpspair = nbody * nbody * maxnpspair;
/* memory allocation */
// fragment
t = sizeof(double) * fnao2;
_Sfrag_ = (double*)malloc( t ); total += t;
_Hfrag_ = (double*)malloc( t ); total += t;
_Ufrag_ = (double*)malloc( t ); total += t;
_Pfrag_ = (double*)malloc( t ); total += t;
_WORK_ = (double*)malloc( t ); total += t;
_D0_ = (double*)malloc( t ); total += t;
_D_ = (double*)malloc( t ); total += t;
t = sizeof(double) * nbody * maxnfao;
_ev_ = (double*)malloc( t ); total += t;
t = sizeof(double) * nbody * maxnfatom;
t = sizeof(double) * nbody * nbody * maxnfao * maxnfao;
_C_ = (double*)malloc( t ); total += t;
// IFC4C monomers
t = sizeof(double) * nao2;
_Dmon_ = ofmo_alloc_dmatrix( MAXNIFC4C, nao2 );
total += (t * MAXNIFC4C);
/* cutoff table of fragment */
t = sizeof(int) * (maxlqn2+1+1);
lcs_pair = (int*)malloc( t ); total += t;
t = sizeof(int) * fncs2;
csp_ics = (int*)malloc( t ); total += t;
csp_jcs = (int*)malloc( t ); total += t;
csp_lps_pair = (int*)malloc( t ); total += t;
t = sizeof(double) * fncs2;
csp_schwarz = (double*)malloc( t ); total += t;
t = sizeof(double) * fnpspair;
psp_zeta = (double*)malloc( t ); total += t;
psp_dkps = (double*)malloc( t ); total += t;
psp_xiza = (double*)malloc( t ); total += t;
/* cutoff table of IFC4C monomers */
t = sizeof(int) * MAXNIFC4C * (maxlqn2+1+1);
lcs_pair_mon = ofmo_alloc_imatrix( MAXNIFC4C, (maxlqn2+1+1) );
total += t;
t = sizeof(int) * MAXNIFC4C * ncs2;
csp_ics_mon = ofmo_alloc_imatrix( MAXNIFC4C, ncs2 ); total += t;
csp_jcs_mon = ofmo_alloc_imatrix( MAXNIFC4C, ncs2 ); total += t;
csp_lps_pair_mon = ofmo_alloc_imatrix( MAXNIFC4C, ncs2 ); total += t;
t = sizeof(double) * MAXNIFC4C * ncs2;
csp_schwarz_mon = ofmo_alloc_dmatrix( MAXNIFC4C, ncs2 ); total += t;
t = sizeof(double) * MAXNIFC4C * maxnpspair;
psp_zeta_mon = ofmo_alloc_dmatrix( MAXNIFC4C, maxnpspair);
total += t;
psp_dkps_mon = ofmo_alloc_dmatrix( MAXNIFC4C, maxnpspair);
total += t;
psp_xiza_mon = ofmo_alloc_dmatrix( MAXNIFC4C, maxnpspair);
total += t;
/* スレッドごとに確保する必要のある配列 */
int maxthreads;
maxthreads = omp_get_max_threads();
dUmaster = ofmo_alloc_dmatrix( maxthreads, fnao2 );
dUmaster2 = ofmo_alloc_dmatrix( maxthreads, fnao2 );
total += (maxthreads * fnao2)*sizeof(double);
_atpop_local_ = ofmo_alloc_dmatrix( maxthreads, maxnfatom );
total += (maxthreads * maxnfatom) * sizeof(double);
/* 全モノマーのpopulation */
aopop_mon = ofmo_alloc_dmatrixv( nfrag, nfao );
total += total_nfao * sizeof(double);
atpop_mon = ofmo_alloc_dmatrixv( nfrag, nfatom );
total += total_nfatom * sizeof(double);
memset(aopop_mon[0], '\0', sizeof(double)*total_nfao );
memset(atpop_mon[0], '\0', sizeof(double)*total_nfatom );
/* AOの順序変換用 */
t = sizeof(int) * nao_total;
_aoconv_ = (int*)malloc( t ); total += t;
/* フラグメント電子状態計算におけるジョブリスト */
t = sizeof(int) * nfrag;
_joblist_ifc4c_ = (int*)malloc( t ); total += t;
_joblist_ifc3c_ = (int*)malloc( t ); total += t;
/* モノマーのエネルギー(環境ポテンシャル項除く) */
t = sizeof(double) * nfrag;
_menergy0_ = (double*)malloc( t ); total += t;
/* フラグメントのpopulationデータ */
_daopop_ = (double*)malloc( sizeof(double) * nbody * maxnfao );
_datpop_ = (double*)malloc( sizeof(double) * nbody * maxnfatom );
total += ( sizeof(double) * nbody * (maxnfao + maxnfatom) );
// information for memory allocation
if ( fp_prof ) {
double dsize;
dsize = (double)total / (double)(1024*1024);
fprintf( fp_prof,
"== allocd memory size in ofmo-calc-frag.c = %10.3f MB\n",
dsize );
}
atexit( dealloc );
called = true;
return 0;
}
int ofmo_frag_init() {
int maxnfao, nbody;
ofmo_data_get_vals("maxnfao nbody", &maxnfao, &nbody );
ofmo_scf_init( maxnfao*nbody );
alloc();
init_frag_order();
return 0;
}
int ofmo_monomer_init_density( const int *imsg, MPI_Comm comm ) {
int nprocs, myrank;
int myfrag;
static int called=false, nfrag, *nfao, *nfatom, maxnfao, maxnfatom;
double *D, *aop, *atp;
if ( !called ) {
ofmo_data_get_vals("nfrag nfao nfatom maxnfao maxnfatom",
&nfrag, &nfao, &nfatom, &maxnfao, &maxnfatom );
called = true;
}
MPI_Comm_rank( comm, &myrank );
MPI_Comm_size( comm, &nprocs );
if ( imsg[6] >= nfrag ) return 0;
D = _D_;
aop = _daopop_;
atp = _datpop_;
myfrag = imsg[6] + myrank;
{
if ( fp_prof ) {
int start, end;
start = imsg[6];
end = start + nprocs;
if ( end > nfrag ) end = nfrag;
fprintf(fp_prof, "frag= ");
for ( int ifrag=start; ifrag<end; ifrag++ ) {
fprintf( fp_prof, " %3d", ifrag );
}
fprintf( fp_prof, "\n");
fflush( fp_prof );
}
}
if ( myfrag < nfrag ) {
ofmo_monomer_initial_density( myfrag, D, aop, atp );
ofmo_put_monomer_density( myfrag, D );
ofmo_put_monomer_aopop( myfrag, aop );
ofmo_put_monomer_atpop( myfrag, atp );
}
return 0;
}
static int ofmo_construct_init_density( MPI_Comm comm,
const int nmonomer, const int monomer_list[],
const int nao, const int fsao2tuao[],
const int nao_total, int **msao2tuao, const int nfao[],
double D[] ) {
int myrank;
MPI_Comm_rank( comm, &myrank );
//is_root = ( myrank == root );
if ( nmonomer == 1 ) {
int ifrag;
//nao2 = (nao*nao+nao)>>1;
ifrag = monomer_list[0];
ofmo_get_monomer_density( comm, ifrag, D );
return 0;
}
/* initial condition */
int *tuao2fsao=_aoconv_, nao2;
for ( int iao=0; iao<nao_total; iao++ ) tuao2fsao[iao] = -1;
for ( int iao=0; iao<nao; iao++ ) tuao2fsao[ fsao2tuao[iao] ] = iao;
nao2 = (nao*nao+nao)>>1;
memset( D, '\0', sizeof(double)*nao2 );
/* construct fragment (dimer, trimer, ...) initial density */
int i, ifrag, iao, jao, ijao, I, J, IJ, I2;
double *Dmon = _Sfrag_; /* _Sfrag_ is used as temporary array */
for ( i=0; i<nmonomer; i++ ) {
ifrag = monomer_list[i];
ofmo_get_monomer_density( comm, ifrag, Dmon );
for ( iao=0, ijao=0; iao<nfao[ifrag]; iao++ ) {
if ( (I=tuao2fsao[ msao2tuao[ifrag][iao] ]) < 0 ) {
dbg("error(ifrag=%d, iao=%d)\n", ifrag, iao );
fflush(stdout);
return -1;
}
I2 = (I*I+I)>>1; /* = I*(I+1)/2 */
for ( jao=0; jao<=iao; jao++, ijao++ ) {
if ( (J = tuao2fsao[ msao2tuao[ifrag][jao] ]) < 0 ) {
dbg("error(ifrag=%d, iao=%d)\n", ifrag, jao );
fflush(stdout);
return -1;
}
IJ = (I>=J ? I2+J : ( ((J*J+J)>>1)+I ) );
D[IJ] += Dmon[ijao];
} /* for ( jao ) */
} /* for ( iao ) */
} /* for ( i<nmonomer ) */
return 0;
}
extern size_t ofmo_twoint_get_max_nzeri( const int mythread );
extern size_t ofmo_twoint_get_stored_nzeri( const int mythread );
static int NEW_SCC_STEP = false;
void ofmo_set_new_scc_flag() { NEW_SCC_STEP = true; }
/** フラグメントのRHF計算を行う関数
*
* FMO計算における、周辺モノマーからの環境ポテンシャル項と
* 射影演算子項を考慮したRHF電子状態計算を行う。
* この関数内部では、主に、以下の処理を行っている。
*
* - 2電子積分、4中心クーロン積分のためのカットオフテーブル計算
* - 必要なモノマー密度行列データの取得
* - buffered SCF法のための1回目の2電子積分計算(決められた容量の
* メモリに入るだけ2電子積分計算を行う)
* - 4中心クーロンポテンシャル項の計算
* - 3中心クーロンポテンシャル項の計算
* - 2中心クーロンポテンシャル項の計算
* - (通常の)1電子積分計算
* - 射影演算子項の計算
* - SCF関数呼び出し
*
* このうち、2中心クーロンポテンシャル部分には動的負荷分散を適用し、
* それ以外の部分には、静的負荷分散を用いている。
*
* @param[in] comm 電子状態計算を行うworker groupのコミュニケータ
* @param[in] nmonomer フラグメントを構成するモノマー数
* @param[in] monomer_list[] フラグメントを構成するモノマー番号リスト
* @param[out] D[] 得られた密度行列(圧縮U形式)
* @param[out] C[] 得られたMO係数行列(正方行列)
* @param[out] e[] 得られたMOエネルギー(ベクトル)
* @param[out] aopop[] Mulliken population解析によるAO population
* @param[out] atpop[] Mulliken population解析によるatomic population
* @param[out] *energy 環境ポテンシャル項を含むフラグメントのエネルギー
* @param[out] *energy0 環境ポテンシャル項を除いたフラグメントのエネルギー
* @param[out] *ddv ダイマーSCF計算での環境ポテンシャルの変化量
* @param[out] daopop[] ダイマーSCF計算でのAO populationの変化量
* @param[out] datpop[] ダイマーSCF計算でのatomic populationの変化量
* \f[
* \tt{*ddv} = \rm{Tr}\,
* \left( {\boldmath D \unboldmath}_{IJ} -
* {\boldmath D \unboldmath}_I - {\boldmath D \unboldmath}_J \right)
* {\boldmath U \unboldmath}_{IJ}
*
* \f]
*
* @ingroup ofmo-calc
* */
int ofmo_calc_fragment_electronic_state(
MPI_Comm comm, int nmonomer, int monomer_list[], int level,
double tolscf,
double *energy, double *energy0, double *ddv,
int *fnao, double daopop[], int sao2tao[],
int *fnat, double datpop[], int fatom2tatom[] ) {
static int nfrag, maxnfatom, maxnfcs, maxnfao, maxnfps, maxscf;
static int total_nfao, total_nfatom;
static int maxlqn, **mlcs;
static int *nfatom, *nfcs, *nfao, *nfps;
static int **mshel_tem, **mshel_atm, **mshel_add, **mshel_ini;
static double **matom_x, **matom_y, **matom_z;
static double **mprim_exp, **mprim_coe;
static int **matomic_number, *icharge, **ifatom;
static int maxnpspair, nbody, **msao2muao, **msao2tuao;
static int nao_total;
static int maxlqn2;
static int itypes[6*3];
static int type4c[6*6];
static int called = false;
static long nintic = 0;
static int itol=30, icut=15;
if(fp_prof) {fprintf(fp_prof,"--- calc_fragment^%d (tolscf=%8.2e) ---\n",nmonomer,tolscf);fflush(fp_prof);}
if ( !called ) {
int ierr;
ierr = ofmo_data_get_vals(
"nintic itol icut "
"nfrag maxlqn maxnfatom maxnfcs maxnfao maxnfps "
"nfatom nfcs nfao nfps "
"mlcs mshel_tem mshel_atm mshel_add mshel_ini "
"mprim_exp mprim_coe icharg "
"matx maty matz matn "
"maxnpspair nbody ifatom msao2muao msao2tuao maxscf nao",
&nintic, &itol, &icut,
&nfrag, &maxlqn, &maxnfatom, &maxnfcs, &maxnfao, &maxnfps,
&nfatom, &nfcs, &nfao, &nfps,
&mlcs, &mshel_tem, &mshel_atm, &mshel_add, &mshel_ini,
&mprim_exp, &mprim_coe, &icharge,
&matom_x, &matom_y, &matom_z, &matomic_number,
&maxnpspair, &nbody, &ifatom, &msao2muao, &msao2tuao,
&maxscf, &nao_total);
if ( ierr != 0 ) return -1;
total_nfao = ofmo_isum( nfrag, nfao );
total_nfatom = ofmo_isum( nfrag, nfatom );
maxlqn2 = ((maxlqn+1)*(maxlqn+2))>>1;
if ( maxlqn == 0 ) {
itypes[0] = type4c[0] = 0;
} else if ( maxlqn <= 2 ) {
int Lab, Lcd, Lc, ix;
ix=0;
for ( Lab=0; Lab<maxlqn2; Lab++ ) {
for ( Lc=0; Lc<=maxlqn; Lc++ ) {
itypes[ix] = Lab*3 + Lc;
ix++;
}
}
ix=0;
for ( Lab=0; Lab<maxlqn2; Lab++ ) {
for ( Lcd=0; Lcd<maxlqn2; Lcd++ ) {
type4c[ix] = Lab*6 + Lcd;
ix++;
}
}
} else return -1;
#pragma omp parallel
{
int mythread;
mythread = omp_get_thread_num();
(void)ofmo_twoint_alloc_local_gmat( mythread, maxnfao*nbody );
}
called = true;
}
/* MPI情報の取得 */
int myrank, nprocs;
int root = 0, is_root;
MPI_Comm_rank( comm, &myrank );
MPI_Comm_size( comm, &nprocs );
is_root = ( myrank == root );
/* プロファイルIDの取得 */
static int cid_cutoff, cid_eri, cid_4c, cid_3c, cid_2c, cid_buf;
static int tid_init, tid_cutoff, tid_integ, tid_comm; // 詳細
static int tid_Init, tid_Integ, tid_SCF, tid_Total;
cid_cutoff = ofmo_create_thread_timer( "CUTOFF", 0 );
cid_eri = ofmo_create_thread_timer( "ERI", 0 );
cid_4c = ofmo_create_thread_timer( "IFC4C", 0 );
cid_3c = ofmo_create_thread_timer( "IFC3C", 0 );
cid_2c = ofmo_create_thread_timer( "IFC2C", 0 );
cid_buf = ofmo_create_thread_timer( "BUF", 1 );
tid_init = ofmo_create_proc_timer( "init", 0 );
tid_cutoff = ofmo_create_proc_timer( "cutoff", 0 );
tid_integ = ofmo_create_proc_timer( "integ", 0 );
tid_comm = ofmo_create_proc_timer( "comm", 0 );
tid_Init = ofmo_create_proc_timer( "INIT", 1 );
tid_Integ = ofmo_create_proc_timer( "INTEG", 1 );
tid_SCF = ofmo_create_proc_timer( "SCF", 1 );
tid_Total = ofmo_create_proc_timer( "TOTAL", 1 );
ofmo_start_proc_timer( tid_init );
ofmo_start_proc_timer( tid_Init );
ofmo_start_proc_timer( tid_Total );
/* フラグメントの原子データ、基底関数データの取得 */
int nat, ncs, nao, nps, *atomic_number, *fat2tat, ierr;
int *flcs, *shel_tem, *shel_atm, *shel_add, *shel_ini, *fsao2tuao;
int *fsao2fuao;
double *atom_x, *atom_y, *atom_z;
double *prim_exp, *prim_coe;
int charge;
ofmo_fragment_init( nmonomer, monomer_list );
ierr = ofmo_data_get_vals("fnatom fncs fnao fnps "
"fatn fat2tat fatx faty fatz "
"flcs fshel_tem fshel_atm fshel_add fshel_ini "
"fprim_exp fprim_coe fsao2tuao fsao2fuao",
&nat, &ncs, &nao, &nps,
&atomic_number, &fat2tat, &atom_x, &atom_y, &atom_z,
&flcs, &shel_tem, &shel_atm, &shel_add, &shel_ini,
&prim_exp, &prim_coe, &fsao2tuao, &fsao2fuao );
if ( ierr != 0 ) {
if ( fp_prof ) fdbg(fp_prof, "error\n");
return -1;
}
charge = 0;
for ( int i=0; i<nmonomer; i++ )
charge += icharge[ monomer_list[i] ];
/* number of electrons */
int nelec, nocc, nao2;
nao2 = nao*(nao+1)/2;
nelec = isum( nat, atomic_number ) - charge;
nocc = nelec / 2;
if ( (nelec%2) != 0) {
if ( fp_prof ) {
fprintf( fp_prof, "ERROR: fragment(");
for ( int i=0; i<nmonomer; i++ )
fprintf( fp_prof, "%d%s", monomer_list[i],
( (i==(nmonomer-1)? ") " : "," ) ) );
fprintf( fp_prof, ": odd number of electron (%d)\n", nelec );
}
//return -1;
}
double *S = _Sfrag_, *H = _Hfrag_, *P = _Pfrag_, *U = _Ufrag_;
double *D = _D_, *C = _C_;
double *ev = _ev_;
memset( U, '\0', sizeof(double)*nao2 );
// temporary
//int eribfsz = 4096;
//int eribfsz = 1200;
//int eribfsz = 1024;
//int eribfsz = 480;
//int eribfsz = 2;
size_t eribfsz = 0;
//if (nintic>0) eribfsz = nintic*(sizeof(double)+4*sizeof(short))/1024/1024;
if (nintic>0) eribfsz = nintic;
else eribfsz = -nintic*8/1024/1024;
if ( fp_prof ) {fprintf(fp_prof,"buffer size = %ld\n", eribfsz); fflush(fp_prof);}
eribfsz /= omp_get_max_threads();
/* make joblist */
int *joblist_ifc4c = _joblist_ifc4c_;
int *joblist_ifc3c = _joblist_ifc3c_;
int nifc4c, nifc3c;
int njob_ifc3c, njob_ifc2c, uifc3c;
int njob_ifc4c, uifc4c;
ofmo_make_approx_level( nmonomer, monomer_list,
&nifc4c, joblist_ifc4c, &nifc3c, joblist_ifc3c, comm );
/*// global counterを用いた動的負荷分散の準備
uifc4c = maxlqn2 * maxlqn2;
njob_ifc4c = ((nifc4c-1) * uifc4c)<<7; // << 7 means *128
if ( nifc4c == 0 ) njob_ifc4c = 0;
uifc3c = (maxlqn+1) * maxlqn2;
njob_ifc3c = (nifc3c * uifc3c)<<4; // <<4 means *16
njob_ifc2c = nfrag * maxlqn2;
ofmo_gc_init( 2, comm, 0, njob_ifc4c );
ofmo_gc_init( 0, comm, 0, njob_ifc3c );
ofmo_gc_init( 1, comm, 0, njob_ifc2c );*/
ofmo_mt_init( comm );
ofmo_acc_proc_timer( tid_init );
/* thread-parallelized calculation of cutoff tables */
ofmo_start_proc_timer( tid_cutoff );
double **Dmons=_Dmon_;
double *D0 = _D0_;
double Enuc;
int mc = -2; /* global counter in local process */
#ifndef PARA_SUB
#pragma omp parallel
#else
int nnewjob;
int *newjoblist;
int amyrank, anprocs;
int color;
double *wjob;
#pragma omp parallel shared(nnewjob, newjoblist, amyrank, anprocs, color, wjob)
#endif
{
int i, ifrag, flag=false;
//int n;
// for profile
int mythread, nthreads;
mythread = omp_get_thread_num();
nthreads = omp_get_num_threads();
ofmo_start_thread_timer( cid_cutoff, mythread );
while (1) {
#pragma omp master
{
if ( NEW_SCC_STEP ) {
if ( is_root ) {
ofmo_get_monomer_aopop( -1, aopop_mon[0] );
ofmo_get_monomer_atpop( -1, atpop_mon[0] );
}
MPI_Bcast( aopop_mon[0], total_nfao, MPI_DOUBLE,
root, comm );
MPI_Bcast( atpop_mon[0], total_nfatom, MPI_DOUBLE,
root, comm );
NEW_SCC_STEP = false;
}
ofmo_construct_init_density( comm, nmonomer, monomer_list,
nao, fsao2tuao,
nao_total, msao2tuao, nfao,
D );
if ( nmonomer > 1 ) memcpy( D0, D, sizeof(double) * nao2 );
/* read density matrices to be used ifc4c calculations */
for ( i=0; i<nifc4c; i++ ) {
ifrag = joblist_ifc4c[i];
ofmo_get_monomer_density( comm, ifrag, Dmons[i] );
}
flag = ( nthreads == 1 ? false : true );
} // pragma omp master
if ( flag == true ) break;
#pragma omp critical
{ i = mc; mc++; }
if ( i == -2 ) {
/* nuclear repulsion */
Enuc = ofmo_calc_nuclear_repulsion( nat, atomic_number,
atom_x, atom_y, atom_z );
} else if ( i == -1 ) {
ofmo_cutoff_make_table( maxlqn, flcs, shel_tem,
shel_atm, shel_add, atom_x, atom_y, atom_z,
prim_exp, prim_coe,
lcs_pair, csp_schwarz, csp_ics, csp_jcs,
csp_lps_pair, psp_zeta, psp_dkps, psp_xiza );
} else {
if ( i >= nifc4c ) break;
ifrag = joblist_ifc4c[i];
ofmo_cutoff_make_table( maxlqn, mlcs[ifrag],
mshel_tem[ifrag], mshel_atm[ifrag],
mshel_add[ifrag],
matom_x[ifrag], matom_y[ifrag], matom_z[ifrag],
mprim_exp[ifrag], mprim_coe[ifrag],
lcs_pair_mon[i], csp_schwarz_mon[i],
csp_ics_mon[i], csp_jcs_mon[i],
csp_lps_pair_mon[i], psp_zeta_mon[i],
psp_dkps_mon[i], psp_xiza_mon[i] );
}
} // while
ofmo_acc_thread_timer( cid_cutoff, mythread );
} // pragma omp parallel
ofmo_acc_proc_timer( tid_cutoff );
ofmo_acc_proc_timer( tid_Init );
ofmo_start_proc_timer( tid_integ );
ofmo_start_proc_timer( tid_Integ );
double scfd=tolscf, scfe;
if ( scfd <= 1.e-4 ) scfe = scfd * 1.e-2;
else if ( scfd <= 1.e-3 ) scfe = scfd * 1.e-1;
else scfe = scfd * 1.e-0;
if (scfe<1e-10) scfe=1e-10;
#if 0
float eps_ps4 = EPS_PS4;
float eps_eri = EPS_ERI;
float eps_sch = EPS_SCH;
#else
float eps_ps4 = pow(10.0,-itol);
float eps_sch = pow(10.0,-icut);
float eps_eri = eps_sch;
#endif
// float eps_fac = (scfe>1e-8)? scfe/1e-8: 1.0;
float eps_fac = (scfe>1e-7)? scfe/1e-8: 0.1F; // Increase accuracy for direct & fdiff run
eps_ps4 *= eps_fac;
eps_eri *= eps_fac;
eps_sch *= eps_fac;
if(fp_prof) {fprintf(fp_prof, "scfd scfe eps_ps4 eps_sch eps_eri: %8.2e %8.2e %8.2e %8.2e %8.2e\n", scfd, scfe, eps_ps4, eps_sch, eps_eri); fflush(fp_prof);}
ofmo_twoint_eps_ps4(eps_ps4*EPS_FAC_IFC4C);
ofmo_twoint_eps_sch(eps_sch*EPS_FAC_IFC4C);
ofmo_twoint_eps_eri(eps_eri*EPS_FAC_IFC4C);
#pragma omp parallel
{
int nworkers, workerid;
int j, jfrag;
int jn;
int iat, k;
double *dU, *atpop_mt;
int local_id;
int mythread, nthreads;
// for control load-balancing
int offset = 0;
//
int mytype, tfrag, mypos, base;
mythread = omp_get_thread_num();
nthreads = omp_get_num_threads();
dU = dUmaster[mythread];
double *dUtmp = dUmaster2[mythread];
atpop_mt = _atpop_local_[mythread];
memset( dU, '\0', sizeof(double)*nao2 );
nworkers = ofmo_mt_get_nworkers();
workerid = ofmo_mt_get_workerid( mythread );
ofmo_start_thread_timer( cid_eri, mythread );
// for control load-balancing
ofmo_integ_set_loop_offset( mythread, offset );
/* ERI calculation ( 1st time ) */
ofmo_integ_twoint_first(
nworkers, workerid, eribfsz,
maxlqn, shel_atm, shel_ini, atom_x, atom_y, atom_z,
lcs_pair, csp_schwarz, csp_ics, csp_jcs, csp_lps_pair,
psp_zeta, psp_dkps, psp_xiza );
ofmo_acc_thread_timer( cid_eri, mythread );
{
int last_eri_type;
double dnmax, dnzeri, rate;
dnmax = (double)ofmo_twoint_get_max_nzeri( mythread );
dnzeri = (double)ofmo_twoint_get_stored_nzeri( mythread );
last_eri_type = ofmo_twoint_get_last_eri_type( mythread );
//if (fp_prof) {fprintf(fp_prof,"(%d) %d %ld/%ld\n",mythread,last_eri_type,(long)dnzeri,(long)dnmax);fflush(fp_prof);};
if ( last_eri_type < 21 ) { // d関数までを仮定
rate = (double)last_eri_type;
} else if ( dnmax > 0.e0 ) rate = dnzeri / dnmax * 100.e0;
else rate = 100.1e0;
ofmo_set_thread_timer( cid_buf, mythread, rate );
}
/* environmental potential */
/* 4-centered inter-fragment Coulomb term */
ofmo_start_thread_timer( cid_4c, mythread );
if ( myrank == 0 ) {
if ( mythread == 0 ) {
memset( S, '\0', sizeof(double)*nao2 );
memset( H, '\0', sizeof(double)*nao2 );
}
#pragma omp barrier
ofmo_integ_oneint_sorted( nthreads, mythread, maxlqn, flcs,
shel_tem, shel_atm, shel_add, shel_ini,
atom_x, atom_y, atom_z, prim_exp, prim_coe,
nat, atomic_number, S, H );
#pragma omp barrier
if ( mythread == 0 ) {
ofmo_projection_operator( nmonomer, monomer_list,
nao, fsao2tuao, S, P );
acc_array(nao2, P, H);
ofmo_scale_diag( nao, 0.5e0, H );
}
#pragma omp barrier
}
#pragma omp master
if(fp_prof) {fprintf(fp_prof,"(%d) calc_fragment ifc4c (%d)\n",myrank,nifc4c);}
#ifdef PARA_SUB
int njob = nifc4c;
#pragma omp master
{
newjoblist=(int*)malloc(njob*sizeof(int));
wjob=(double *)malloc(njob*sizeof(double)+njob*2*sizeof(int));
}
#if 0
#pragma omp master
for ( j=0; j<njob; j++ ) {
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
wjob[j] = (double)(lcs_pair_mon[j])[Lab+1];
}
#else
#pragma omp barrier
#pragma omp for
for ( j=0; j<njob; j++ ) {
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int ncspair_f = lcs_pair[Lab+1];
int ncspair_m = (lcs_pair_mon[j])[Lab+1];
wjob[j]=(double)ncspair_f*ncspair_m;
//double eps_ps4 = ofmo_twoint_eps_ps4(0);
double eps_ps4_ifc4c = ofmo_twoint_eps_ps4(0);
size_t nps4=0;
for (int ii=0; ii<ncspair_f; ii++) {
for (int jj=0; jj<ncspair_m; jj++)
if (csp_schwarz[ii]*(csp_schwarz_mon[j])[jj]>=eps_ps4_ifc4c) nps4++;
}
wjob[j]=(double)nps4;
}
#endif
#pragma omp master
{
int *atask=(int *)(wjob+njob);
int *res=atask+njob;
int natask=ofmo_aggregateTask(nprocs, njob, wjob, atask);
//if (myrank==0) printf("%d %d -> %d %d\n",nprocs, njob, nprocs, natask);
ofmo_assignRes(nprocs, natask, wjob);
for ( j=0; j<natask; j++ ) res[j]=(int)wjob[j];
color=ofmo_getMyColorAndRankFromAssignedRes(myrank,res,&amyrank);
anprocs = res[color];
nnewjob=0;
for (j=0; j<njob; j++) if (atask[j]==color) newjoblist[nnewjob++]=j;
free(wjob);
}
#ifdef USE_CUDA
cuda_Reconfig(amyrank, anprocs, comm);
#endif
#pragma omp barrier
ofmo_integ_set_loop_offset( mythread, 0 );
#endif /* PARA_SUB */
#ifdef USE_CUDA
float *csp_schwarz_f;
{
int nat_f=nat, nao_f=nao, ncs_f=ncs;
int nat_m=0, nao_m=0, ncs_m=0;
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int ncspair_f = lcs_pair[Lab+1];
int npspair_f = csp_lps_pair[ncspair_f];
int ncspair_m = 0;
int npspair_m = 0;
int max_num_klcs = 0;
#ifndef PARA_SUB
for ( j=0; j<nifc4c; j++ ) {
#else
for (int aj=0; aj<nnewjob; aj++ ) {
j = newjoblist[aj];
#endif
jfrag = joblist_ifc4c[j];
nat_m = MAX2(nat_m, nfatom[jfrag]);
ncs_m = MAX2(ncs_m, nfcs[jfrag]);
nao_m = MAX2(nao_m, nfao[jfrag]);
int ncspair_m0 = (lcs_pair_mon[j])[Lab+1];
ncspair_m = MAX2(ncspair_m, ncspair_m0);
npspair_m = MAX2(npspair_m, (csp_lps_pair_mon[j])[ncspair_m0]);
max_num_klcs = MAX2(max_num_klcs, cuda_max_num_klcs(maxlqn, lcs_pair_mon[j]));
}
csp_schwarz_f = (float*)malloc(sizeof(float)*MAX2(ncspair_f,ncspair_m));
cuda_ifc4c_Init(maxlqn, max_num_klcs,
nat_f, ncs_f, nao_f, ncspair_f, npspair_f,
nat_m, ncs_m, nao_m, ncspair_m, npspair_m );
for (int ii=0; ii<ncspair_f; ii++)
csp_schwarz_f[ii]=(float)csp_schwarz[ii];
cuda_ifc4c_SetData(0,
maxlqn, nat_f, ncs_f, nao_f, ncspair_f, npspair_f,
shel_atm, shel_ini, atom_x, atom_y, atom_z,
lcs_pair, csp_lps_pair, csp_ics, csp_jcs,
psp_zeta, psp_dkps, psp_xiza,
csp_schwarz_f, NULL);
}
#endif
memset( dUtmp, '\0', sizeof(double)*nao2 );
#ifndef PARA_SUB
for ( j=0; j<nifc4c; j++ ) {
#else
for (int aj=0; aj<nnewjob; aj++ ) {
j = newjoblist[aj];
#endif
jfrag = joblist_ifc4c[j];
#ifdef USE_CUDA
int nat_mon = nfatom[jfrag];
int ncs_mon = nfcs[jfrag];
int nao_mon = nfao[jfrag];
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int ncspair_mon = (lcs_pair_mon[j])[Lab+1];
int npspair_mon = (csp_lps_pair_mon[j])[ncspair_mon];
for (int ii=0; ii<ncspair_mon; ii++)
csp_schwarz_f[ii]=(float)(csp_schwarz_mon[j])[ii];
cuda_ifc4c_SetData(1,
maxlqn, nat_mon, ncs_mon, nao_mon,
ncspair_mon, npspair_mon,
mshel_atm[jfrag], mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
lcs_pair_mon[j], csp_lps_pair_mon[j],
csp_ics_mon[j], csp_jcs_mon[j],
psp_zeta_mon[j], psp_dkps_mon[j], psp_xiza_mon[j],
csp_schwarz_f, Dmons[j]);
#endif
#ifndef PARA_SUB
ofmo_integ_ifc4c_sorted_partial( nworkers, workerid,
#else
int anworkers = anprocs * omp_get_num_threads();
int aworkerid = amyrank * omp_get_num_threads() + omp_get_thread_num();
ofmo_integ_ifc4c_sorted_partial( anworkers, aworkerid,
#endif
maxlqn, shel_atm, shel_ini, atom_x, atom_y, atom_z,
lcs_pair, csp_schwarz, csp_ics, csp_jcs,
csp_lps_pair, psp_zeta, psp_dkps, psp_xiza,
mshel_atm[jfrag], mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
lcs_pair_mon[j],
csp_schwarz_mon[j], csp_ics_mon[j], csp_jcs_mon[j],
csp_lps_pair_mon[j],
psp_zeta_mon[j], psp_dkps_mon[j], psp_xiza_mon[j],
mlcs[jfrag],
nfao[jfrag], Dmons[j], dUtmp );
}
#ifdef USE_CUDA
Free(csp_schwarz_f);
cuda_ifc4c_GetVfrg(nao, dUtmp);
cuda_ifc4c_Finalize();
#endif
#ifdef PARA_SUB
#pragma omp barrier
#pragma omp master
free(newjoblist);
#ifdef USE_CUDA
cuda_Reconfig(myrank, nprocs, comm);
#endif
ofmo_integ_set_loop_offset( mythread, 0 );
#endif /* PARA_SUB */
acc_array( nao2, dUtmp, dU );
//for ( int ii=0; ii<nao2; ii++ ) dU[ii] *= 2.e0;
ofmo_dscale( nao2, 2.e0, dU );
ofmo_acc_thread_timer( cid_4c, mythread );
/* 3-centered inter-fragment Coulomb term */
ofmo_start_thread_timer( cid_3c, mythread );
//if(fp_prof) {fprintf(fp_prof,"calc_fragment ifc3c\n");fflush(fp_prof);}
memset( dUtmp, '\0', sizeof(double)*nao2 );
for ( j=0,jn=0; j<nifc3c; j++ ) {
jfrag = joblist_ifc3c[j];
ofmo_integ_ifc3c_sorted_partial( nworkers, workerid,
maxlqn, shel_atm, shel_ini, atom_x, atom_y, atom_z,
lcs_pair, csp_ics, csp_jcs, csp_lps_pair,
psp_zeta, psp_dkps, psp_xiza,
mlcs[jfrag],
mshel_tem[jfrag], mshel_atm[jfrag],
mshel_add[jfrag], mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
mprim_exp[jfrag], mprim_coe[jfrag],
aopop_mon[jfrag], dUtmp );
}
acc_array( nao2, dUtmp, dU );
ofmo_acc_thread_timer( cid_3c, mythread );
// debug
//for ( int ii=0; ii<nao2; ii++ ) dU[ii] = 0.e0;
/* 2-centered inter-fragment Coulomb term */
ofmo_start_thread_timer( cid_2c, mythread );
//if(fp_prof) {fprintf(fp_prof,"calc_fragment ifc2c\n");fflush(fp_prof);}
memset( dUtmp, '\0', sizeof(double)*nao2 );
for ( j=myrank, jn=0; j<nfrag; j+=nprocs ) {
if (jn++>NPARTIAL) {
acc_array( nao2, dUtmp, dU );
memset( dUtmp, '\0', sizeof(double)*nao2 );
jn=0;
}
jfrag = frag_order[j];
if ( is_in_fragment( jfrag, nmonomer, monomer_list) ) continue;
//nat_mon = nfatom[jfrag];
switch( ofmo_get_approx_level(jfrag) ) {
case OFMO_IFC4C:
case OFMO_IFC3C:
for ( iat=0; iat<nfatom[jfrag]; iat++ )
atpop_mt[iat] = (double)matomic_number[jfrag][iat];
break;
case OFMO_IFC2C:
for ( iat=0; iat<nfatom[jfrag]; iat++ )
atpop_mt[iat] = (double)matomic_number[jfrag][iat]
- atpop_mon[jfrag][iat];
}
ofmo_integ_ifc2c_sorted_partial( nthreads, mythread, maxlqn,
flcs, shel_tem, shel_atm, shel_add, shel_ini,
atom_x, atom_y, atom_z, prim_exp, prim_coe,
nfatom[jfrag], matom_x[jfrag], matom_y[jfrag],
matom_z[jfrag], atpop_mt, dUtmp );
//matom_z[jfrag], atpop, _WORK_ );
}
acc_array( nao2, dUtmp, dU );
// accumulate env.pot in local process
#pragma omp critical
acc_array( nao2, dU, U );
ofmo_acc_thread_timer( cid_2c, mythread );
} // #pragma omp parallel
ofmo_scale_diag( nao, 0.5e0, U );
ofmo_acc_proc_timer( tid_integ );
ofmo_start_proc_timer( tid_integ );
//if(fp_prof) {fprintf(fp_prof,"calc_fragment reduction\n");fflush(fp_prof);}
/* reduction */
ofmo_start_proc_timer( tid_comm );
double *WORK = _WORK_;
MPI_Allreduce( U, WORK, nao2, MPI_DOUBLE, MPI_SUM, comm );
memcpy( U, WORK, sizeof(double)*nao2 );
MPI_Bcast( S, nao2, MPI_DOUBLE, 0, comm );
MPI_Bcast( H, nao2, MPI_DOUBLE, 0, comm );
acc_array(nao2, U, H);
ofmo_acc_proc_timer( tid_comm );
ofmo_acc_proc_timer( tid_Integ );
// profile
if ( fp_prof ) {
if ( nmonomer == 1 )
fprintf(fp_prof,
"**M frag= %4d nat= %3d ncs= %3d nao= %3d"
" nifc4c= %3d nifc3c= %3d\n",
monomer_list[0], nat, ncs, nao, nifc4c, nifc3c );
else if ( nmonomer == 2 ) {
fprintf(fp_prof,
"**D frag= %4d %4d nat= %3d ncs= %3d nao= %3d"
" nifc4c= %3d nifc3c= %3d\n",
monomer_list[0], monomer_list[1], nat, ncs, nao,
nifc4c, nifc3c );
}
fflush(fp_prof);
}
/* SCF calculation */
ofmo_start_proc_timer( tid_SCF );
//if(fp_prof) {fprintf(fp_prof,"calc_fragment scf_rhf\n");fflush(fp_prof);}
/*
double scfd=tolscf, scfe;
if ( scfd <= 1.e-4 ) scfe = scfd * 1.e-3;
else if ( scfd <= 1.e-3 ) scfe = scfd * 1.e-2;
else scfe = scfd * 1.e-1;
*/
ofmo_twoint_eps_ps4(eps_ps4);
ofmo_twoint_eps_eri(eps_eri);
ofmo_twoint_eps_sch(eps_sch);
if ( level == OFMO_RHF ) {
ofmo_scf_set_convType((nmonomer==1)? scc: scf); // Should be in args.
ofmo_scf_rhf(
comm, maxlqn, Enuc, ncs, nao,
flcs, shel_atm, shel_ini, atom_x, atom_y, atom_z,
lcs_pair, csp_schwarz, csp_ics, csp_jcs,
csp_lps_pair, psp_zeta, psp_dkps, psp_xiza,
nat, nocc, S, H, maxscf, scfe, scfd,
D, C, ev, energy );
} else {
if ( fp_prof )
fdbg( fp_prof, "ERROR: method %d is not supported\n", level );
return -1;
}
/* energies */
double dv;
dv = 4.0e0*ofmo_dot_product( nao2, D, U );
*energy0 = *energy - dv;
/* Mulliken population */
double *aopop = _daopop_, *atpop = _datpop_;
if ( nmonomer == 1 ) {
int ifrag = monomer_list[0];
ofmo_scf_mulliken_population(
nat, nao, maxlqn, flcs, shel_atm, shel_ini, S, D,
aopop, atpop );
if ( is_root ) {
ofmo_put_monomer_density( ifrag, D );
ofmo_put_monomer_aopop( ifrag, aopop );
ofmo_put_monomer_atpop( ifrag, atpop );
ofmo_put_monomer_energy( ifrag, energy );
ofmo_put_monomer_energy0( ifrag, energy0 );
}
if ( fp_prof ) {
fprintf(fp_prof, "# E= %16.10f E-DV= %16.10f DV= %16.10f\n",
*energy, *energy0, dv);
fflush( fp_prof );
}
} else if (nmonomer == 2) {
int i;
for ( i=0; i<nao2; i++ ) D0[i]-=D[i];
ofmo_scf_mulliken_population(
nat, nao, maxlqn, flcs, shel_atm, shel_ini, S, D0,
aopop, atpop );
for ( i=0; i<nat; i++ ) atpop[i] *= -1.e0;
for ( i=0; i<nao; i++ ) aopop[i] *= -1.e0;
*ddv = -4.0e0 * ofmo_dot_product( nao2, D0, U );
if ( fp_prof ) {
fprintf( fp_prof,
"# E= %16.10f E-DV= %16.10f DV= %16.10f "
"DDV= %16.10f\n", *energy, *energy0, dv, *ddv);
fflush( fp_prof );
}
// ofmo-worker-mainに渡す変数のコピー(rootランクだけでもよい)
*fnao = nao;
*fnat = nat;
memcpy( daopop, aopop, sizeof(double) * nao );
memcpy( sao2tao, fsao2tuao, sizeof(int) * nao );
memcpy( datpop, atpop, sizeof(double) * nat );
memcpy( fatom2tatom, fat2tat, sizeof(int) * nat );
}
ofmo_acc_proc_timer( tid_SCF );
ofmo_acc_proc_timer( tid_Total );
// timer
ofmo_show_thread_timer_all();
ofmo_show_proc_timer_all();
return 0;
}
static double ofmo_calc_enucij(
const int nati, const int atni[],
const double xi[], const double yi[], const double zi[],
const int natj, const int atnj[],
const double xj[], const double yj[], const double zj[] ) {
int iat, jat;
double A[3], AB[3], AB2;
double qi, qj, enuc = 0.e0;
for ( iat=0; iat<nati; iat++ ) {
qi = (double)atni[iat];
A[0] = xi[iat];
A[1] = yi[iat];
A[2] = zi[iat];
for ( jat=0; jat<natj; jat++ ) {
qj = (double)atnj[jat];
AB[0] = A[0] - xj[jat];
AB[1] = A[1] - yj[jat];
AB[2] = A[2] - zj[jat];
AB2 = AB[0]*AB[0] + AB[1]*AB[1] + AB[2]*AB[2];
if ( AB2 < 1.e-8 ) continue;
enuc += qi*qj * sqrt(1.e0/AB2);
}
}
return enuc;
}
/** すべての近似ダイマー計算を行う関数
*
* いくつかのダイマーをまとめて
* 計算するようにしている。
*
* @param[out] *total_dimer_es_energy ESダイマーのエネルギーの和
*
* @ingroup ofmo-calc
* */
int ofmo_calc_es_dimer( MPI_Comm comm, int njob, int joblist[],
double *energy0 ) {
static int nfrag, *nfao, maxnfao, maxnfatom, maxnfcs;
static int maxlqn, **mlcs;
static int **mshel_tem, **mshel_atm, **mshel_add, **mshel_ini;
static double **matom_x, **matom_y, **matom_z;
static double **mprim_exp, **mprim_coe;
static int *nfatom, **matomic_number, *nfcs, *icharge;
static int **msao2tuao, natom, nao, **ifatom, dold;
static int maxnpspair, nbody;
static double ldimer;
static int type4c[6*6];
static int called = false;
#if 1
#ifdef USE_CUDA
static int first = true;
if (first) {
cuda_print_wifc4c();
}
#endif
#endif
//MPI_Status status;
//int tag = 101;
if ( !called ) {
int ierr, maxlqn2;
ierr = ofmo_data_get_vals(
"nfrag nfao maxnfao maxnfcs maxlqn maxnfatom "
"mlcs mshel_tem mshel_atm mshel_add mshel_ini "
"mprim_exp mprim_coe icharg "
"matx maty matz matn nfatom "
"msao2tuao dold nao natom ifatom "
"nfcs maxnpspair nbody "
"ldim",
&nfrag, &nfao, &maxnfao, &maxnfcs, &maxlqn, &maxnfatom,
&mlcs, &mshel_tem, &mshel_atm, &mshel_add,
&mshel_ini,
&mprim_exp, &mprim_coe, &icharge,
&matom_x, &matom_y, &matom_z, &matomic_number, &nfatom,
&msao2tuao, &dold, &nao, &natom, &ifatom,
&nfcs, &maxnpspair, &nbody,
&ldimer);
if ( ierr != 0 ) return -1;
alloc();
maxlqn2 = ((maxlqn+1)*(maxlqn+2))>>1;
if ( maxlqn == 0 ) {
type4c[0] = 0;
} else if ( maxlqn <= 2 ) {
int Lab, Lcd, ix;
ix=0;
for ( Lab=0; Lab<maxlqn2; Lab++ ) {
for ( Lcd=0; Lcd<maxlqn2; Lcd++ ) {
type4c[ix] = Lab*6 + Lcd;
ix++;
}
}
} else return -1;
called = true;
}
// for profile
int cid_init, cid_integ;
int tid_init, tid_integ, tid_intred, tid_all;
cid_init = ofmo_create_thread_timer( "INIT", 0 );
cid_integ = ofmo_create_thread_timer( "INTEG", 0 );
tid_init = ofmo_create_proc_timer( "init", 0 );
tid_integ = ofmo_create_proc_timer( "integ", 0 );
tid_intred = ofmo_create_proc_timer( "integ+reduce", 0 );
tid_all = ofmo_create_proc_timer( "all", 0 );
ofmo_start_proc_timer( tid_init );
ofmo_start_proc_timer( tid_all );
//int count;
double t0[MAXNJOB*3], t[MAXNJOB*3];
double *dE, *UjDi, *UiDj, Etot[MAXNJOB];
double enucij[MAXNJOB];
double **Di, **Dj;
double energy_es_dimer = 0.e0;
dE = &t0[MAXNJOB*0];
UjDi = &t0[MAXNJOB*1];
UiDj = &t0[MAXNJOB*2];
/* for cutoff table */
/* ifrags */
int **lcs_pair_i;
int **csp_ics_i, **csp_jcs_i, **csp_lps_pair_i;
double **csp_schwarz_i;
double **psp_zeta_i, **psp_dkps_i, **psp_xiza_i;
/* jfrags */
int **lcs_pair_j;
int **csp_ics_j, **csp_jcs_j, **csp_lps_pair_j;
double **csp_schwarz_j;
double **psp_zeta_j, **psp_dkps_j, **psp_xiza_j;
lcs_pair_i = &lcs_pair_mon[0];
csp_ics_i = &csp_ics_mon[0];
csp_jcs_i = &csp_jcs_mon[0];
csp_lps_pair_i = &csp_lps_pair_mon[0];
csp_schwarz_i = &csp_schwarz_mon[0];
psp_zeta_i = &psp_zeta_mon[0];
psp_dkps_i = &psp_dkps_mon[0];
psp_xiza_i = &psp_xiza_mon[0];
lcs_pair_j = &lcs_pair_mon[MAXNJOB];
csp_ics_j = &csp_ics_mon[MAXNJOB];
csp_jcs_j = &csp_jcs_mon[MAXNJOB];
csp_lps_pair_j = &csp_lps_pair_mon[MAXNJOB];
csp_schwarz_j = &csp_schwarz_mon[MAXNJOB];
psp_zeta_j = &psp_zeta_mon[MAXNJOB];
psp_dkps_j = &psp_dkps_mon[MAXNJOB];
psp_xiza_j = &psp_xiza_mon[MAXNJOB];
// monomer density matrices
Di = &_Dmon_[0];
Dj = &_Dmon_[MAXNJOB];
/* communicator */
int nprocs, myrank, root=0, is_root;
MPI_Comm_size( comm, &nprocs );
MPI_Comm_rank( comm, &myrank );
is_root = ( myrank == root );
/* */
if ( njob > MAXNJOB ) {
if ( fp_prof )
fdbg( fp_prof, "Illegal number of jobs (%d)\n", njob );
MPI_Abort( MPI_COMM_WORLD, 1 );
} else if ( fp_prof ) {
fprintf( fp_prof, "#ES njob= %2d\n", njob );
fflush( fp_prof );
}
// 初期化
for ( int ii=0; ii<njob; ii++ )
dE[ii] = UiDj[ii] = UjDi[ii] = enucij[ii] = 0.e0;
*energy0 = 0.e0;
// 動的負荷分散
// 各積分タイプの計算を128分割する
/*int njob_ifc4c, uifc4c, maxlqn2;
maxlqn2 = ((maxlqn+1)*(maxlqn+2))>>1;
uifc4c = maxlqn2 * maxlqn2;
njob_ifc4c = (njob*uifc4c)<<7; // << 7 means "*128"
ofmo_mt_init( comm );
ofmo_gc_init( 2, comm, 0, njob_ifc4c );*/
//ofmo_gc_set_debug_mode();
// カットオフテーブル計算、および、密度行列データ取得
int icut, ncut;
ncut = njob * 2; // カットオフテーブル計算回数
icut = 0;
ofmo_acc_proc_timer( tid_init );
ofmo_start_proc_timer( tid_integ );
ofmo_start_proc_timer( tid_intred );
#ifndef PARA_SUB
#pragma omp parallel
#else
int nnewjob;
int *newjoblist;
int amyrank, anprocs;
int color;
double *wjob;
#pragma omp parallel shared(nnewjob, newjoblist, amyrank, anprocs, color, wjob)
#endif
{
int mythread, nthreads;
int ifrag, jfrag, ni, nj, ni2, nj2;
int ijob, mycut, is_odd;
double *Utmp, *charge;
double dE_tmp;
// for dynamic load-balancing
//int offset;
int nworkers, workerid, vnworkers, vworkerid;
int k, mytype, mypos, base;
mythread = omp_get_thread_num();
nthreads = omp_get_num_threads();
ofmo_start_thread_timer( cid_init, mythread );
Utmp = dUmaster[mythread];
charge = _atpop_local_[mythread];
//
nworkers = ofmo_mt_get_nworkers();
workerid = ofmo_mt_get_workerid( mythread );
vnworkers = nworkers - 1;
vworkerid = workerid - 1;
// マスタースレッドでは、モノマー密度行列データの取得
if ( mythread == 0 ) {
for ( ijob=0; ijob<njob; ijob++ ) {
ifrag = joblist[ijob*2+0];
jfrag = joblist[ijob*2+1];
ni = nfao[ifrag];
nj = nfao[jfrag];
ni2 = (ni*ni+ni)>>1;
nj2 = (nj*nj+nj)>>1;
ofmo_get_monomer_density( comm, ifrag, Di[ijob] );
ofmo_get_monomer_density( comm, jfrag, Dj[ijob] );
}
}
#pragma omp critical
{
mycut = icut;
icut++;
}
while ( mycut < ncut ) {
ifrag = joblist[mycut];
ijob = mycut>>1;
is_odd = ( mycut & 0x01 );
if ( is_odd ) {
/* make cutoff table j */
ofmo_cutoff_make_table( maxlqn, mlcs[ifrag],
mshel_tem[ifrag], mshel_atm[ifrag],
mshel_add[ifrag],
matom_x[ifrag], matom_y[ifrag],
matom_z[ifrag],
mprim_exp[ifrag], mprim_coe[ifrag],
lcs_pair_j[ijob],
csp_schwarz_j[ijob], csp_ics_j[ijob],
csp_jcs_j[ijob], csp_lps_pair_j[ijob],
psp_zeta_j[ijob], psp_dkps_j[ijob],
psp_xiza_j[ijob] );
} else {
/* make cutoff table i */
ofmo_cutoff_make_table( maxlqn, mlcs[ifrag],
mshel_tem[ifrag], mshel_atm[ifrag],
mshel_add[ifrag],
matom_x[ifrag], matom_y[ifrag],
matom_z[ifrag],
mprim_exp[ifrag], mprim_coe[ifrag],
lcs_pair_i[ijob],
csp_schwarz_i[ijob], csp_ics_i[ijob],
csp_jcs_i[ijob], csp_lps_pair_i[ijob],
psp_zeta_i[ijob], psp_dkps_i[ijob],
psp_xiza_i[ijob] );
}
#pragma omp critical
{
mycut = icut;
icut++;
}
} // while ( mycut < ncut );
// ---- ここまでで、カットオフテーブル計算、密度行列データ
// ---- 取得が終わっている
ofmo_acc_thread_timer( cid_init, mythread );
#pragma omp barrier
ofmo_start_thread_timer( cid_integ, mythread );
// カウンタマスタースレッド以外のすべてのスレッドで計算を行う
if ( workerid != 0 ) {
// まず、2中心クーロン相互作用計算を静的負荷分散で行う
for ( ijob=0; ijob<njob; ijob++ ) {
ifrag = joblist[ijob*2+0];
jfrag = joblist[ijob*2+1];
ni = nfao[ifrag];
nj = nfao[jfrag];
ni2 = (ni*ni+ni)>>1;
nj2 = (nj*nj+nj)>>1;
/* UjDi */
for ( k=0; k<nfatom[jfrag]; k++ )
charge[k] = (double)matomic_number[jfrag][k];
memset( Utmp, '\0', sizeof(double)*ni2 );
ofmo_integ_ifc2c_sorted_partial( vnworkers, vworkerid,
maxlqn, mlcs[ifrag], mshel_tem[ifrag],
mshel_atm[ifrag], mshel_add[ifrag],
mshel_ini[ifrag],
matom_x[ifrag], matom_y[ifrag], matom_z[ifrag],
mprim_exp[ifrag], mprim_coe[ifrag],
nfatom[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
charge, Utmp );
ofmo_scale_diag( ni, 0.5e0, Utmp );
dE_tmp = 4.e0 * ofmo_dot_product( ni2, Di[ijob], Utmp );
#ifdef FJ_COMP
#pragma omp critical
UjDi[ijob] += dE_tmp;
#else
#pragma omp atomic
UjDi[ijob] += dE_tmp;
#endif
/* UiDj */
for ( k=0; k<nfatom[ifrag]; k++ )
charge[k] = (double)matomic_number[ifrag][k];
memset( Utmp, '\0', sizeof(double)*nj2 );
ofmo_integ_ifc2c_sorted_partial( vnworkers, vworkerid,
maxlqn, mlcs[jfrag], mshel_tem[jfrag],
mshel_atm[jfrag], mshel_add[jfrag],
mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
mprim_exp[jfrag], mprim_coe[jfrag],
nfatom[ifrag],
matom_x[ifrag], matom_y[ifrag], matom_z[ifrag],
charge, Utmp );
ofmo_scale_diag( nj, 0.5e0, Utmp );
dE_tmp = 4.e0 * ofmo_dot_product( nj2, Dj[ijob], Utmp );
#ifdef FJ_COMP
#pragma omp critical
UiDj[ijob] += dE_tmp;
#else
#pragma omp atomic
UiDj[ijob] += dE_tmp;
#endif
} // for ( ijob=0; ; )
} else { // if ( workerid != 0 )
// カウンタマスタースレッドでは核間反発エネルギーを計算
for ( ijob=0; ijob<njob; ijob++ ) {
ifrag = joblist[ijob*2+0];
jfrag = joblist[ijob*2+1];
enucij[ijob] = ofmo_calc_enucij(
nfatom[ifrag], matomic_number[ifrag],
matom_x[ifrag], matom_y[ifrag],
matom_z[ifrag],
nfatom[jfrag], matomic_number[jfrag],
matom_x[jfrag], matom_y[jfrag],
matom_z[jfrag] );
}
} // if ( workerid != 0 )
// 次に、4中心クーロン相互作用項を静的負荷分散で計算
// for control load-balancing
ofmo_integ_set_loop_offset( mythread, 0 );
#ifdef PARA_SUB
#pragma omp master
{
newjoblist=(int*)malloc(njob*sizeof(int));
wjob=(double *)malloc(njob*sizeof(double)+njob*2*sizeof(int));
}
#pragma omp barrier
#pragma omp for
for ( ijob=0; ijob<njob; ijob++ ) {
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int ncspair_i = (lcs_pair_i[ijob])[Lab+1];
int ncspair_j = (lcs_pair_j[ijob])[Lab+1];
wjob[ijob]=(double)ncspair_i*ncspair_j;
#if 1
double eps_ps4 = ofmo_twoint_eps_ps4(0);
size_t nps4=0;
for (int ii=0;ii<ncspair_i;ii++) {
for (int jj=0;jj<ncspair_j;jj++)
if ((csp_schwarz_i[ijob])[ii]*(csp_schwarz_j[ijob])[jj]>=eps_ps4) nps4++;
}
wjob[ijob]=(double)nps4;
#endif
}
#pragma omp master
{
int *atask=(int *)(wjob+njob);
int *res=atask+njob;
int natask=ofmo_aggregateTask(nprocs, njob, wjob, atask);
//if (myrank==0) printf("%d %d -> %d %d\n",nprocs, njob, nprocs, natask);
ofmo_assignRes(nprocs, natask, wjob);
for (int j=0; j<natask; j++ ) res[j]=(int)wjob[j];
color=ofmo_getMyColorAndRankFromAssignedRes(myrank,res,&amyrank);
anprocs = res[color];
nnewjob=0;
for (int j=0; j<njob; j++) if (atask[j]==color) newjoblist[nnewjob++]=j;
free(wjob);
}
#ifdef USE_CUDA
cuda_Reconfig(amyrank, anprocs, comm);
#endif
#pragma omp barrier
ofmo_integ_set_loop_offset( mythread, 0 );
#endif /* PARA_SUB */
#ifdef USE_CUDA
float *csp_schwarz_f;
{
int nat_i=0, ncs_i=0, nao_i=0, ncspair_i=0, npspair_i=0;
int nat_j=0, ncs_j=0, nao_j=0, ncspair_j=0, npspair_j=0;
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int max_num_klcs = 0;
#ifndef PARA_SUB
for ( ijob=0; ijob<njob; ijob++ ) {
#else
for (int aj=0; aj<nnewjob; aj++ ) {
ijob = newjoblist[aj];
#endif
ifrag = joblist[ijob*2+0];
jfrag = joblist[ijob*2+1];
nat_i = MAX2(nat_i, nfatom[ifrag]);
nat_j = MAX2(nat_j, nfatom[jfrag]);
ncs_i = MAX2(ncs_i, nfcs[ifrag]);
ncs_j = MAX2(ncs_j, nfcs[jfrag]);
nao_i = MAX2(nao_i, nfao[ifrag]);
nao_j = MAX2(nao_j, nfao[jfrag]);
int ncspair_t;
ncspair_t = (lcs_pair_i[ijob])[Lab+1];
ncspair_i = MAX2(ncspair_i, ncspair_t);
npspair_i = MAX2(npspair_i, (csp_lps_pair_i[ijob])[ncspair_t]);
ncspair_t = (lcs_pair_j[ijob])[Lab+1];
ncspair_j = MAX2(ncspair_j, ncspair_t);
npspair_j = MAX2(npspair_j, (csp_lps_pair_j[ijob])[ncspair_t]);
int max_num_klcs_t = cuda_max_num_klcs(maxlqn, lcs_pair_j[ijob]);
max_num_klcs = MAX2(max_num_klcs, max_num_klcs_t);
}
cuda_ifc4c_Init(maxlqn, max_num_klcs,
nat_i, ncs_i, nao_i, ncspair_i, npspair_i,
nat_j, ncs_j, nao_j, ncspair_j, npspair_j);
csp_schwarz_f = (float*)malloc(sizeof(float)*MAX2(ncspair_i,ncspair_j));
}
#endif
//while ( (k=ofmo_gc_nxtval(2)) < njob_ifc4c ) {
#ifndef PARA_SUB
for ( ijob=0; ijob<njob; ijob++ ) {
#else
for (int aj=0; aj<nnewjob; aj++ ) {
ijob = newjoblist[aj];
#endif
ifrag = joblist[ijob*2+0];
jfrag = joblist[ijob*2+1];
ni = nfao[ifrag];
nj = nfao[jfrag];
ni2 = ni*(ni+1)/2;
nj2 = nj*(nj+1)/2;
memset( Utmp, '\0', sizeof(double)*ni2 );
#ifdef USE_CUDA
int Lab = maxlqn*(maxlqn+1)/2 + maxlqn;
int nat_i = nfatom[ifrag];
int ncs_i = nfcs[ifrag];
int nao_i = nfao[ifrag];
int ncspair_i = (lcs_pair_i[ijob])[Lab+1];
int npspair_i = (csp_lps_pair_i[ijob])[ncspair_i];
for (int ii=0; ii<ncspair_i; ii++)
csp_schwarz_f[ii]=(float)(csp_schwarz_i[ijob])[ii];
cuda_ifc4c_SetData(0,
maxlqn, nat_i, ncs_i, nao_i,
ncspair_i, npspair_i,
mshel_atm[ifrag], mshel_ini[ifrag],
matom_x[ifrag], matom_y[ifrag], matom_z[ifrag],
lcs_pair_i[ijob], csp_lps_pair_i[ijob],
csp_ics_i[ijob], csp_jcs_i[ijob],
psp_zeta_i[ijob], psp_dkps_i[ijob], psp_xiza_i[ijob],
csp_schwarz_f, NULL);
int nat_j = nfatom[jfrag];
int ncs_j = nfcs[jfrag];
int nao_j = nfao[jfrag];
int ncspair_j = (lcs_pair_j[ijob])[Lab+1];
int npspair_j = (csp_lps_pair_j[ijob])[ncspair_j];
for (int ii=0; ii<ncspair_j; ii++)
csp_schwarz_f[ii]=(float)(csp_schwarz_j[ijob])[ii];
cuda_ifc4c_SetData(1,
maxlqn, nat_j, ncs_j, nao_j,
ncspair_j, npspair_j,
mshel_atm[jfrag], mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
lcs_pair_j[ijob], csp_lps_pair_j[ijob],
csp_ics_j[ijob], csp_jcs_j[ijob],
psp_zeta_j[ijob], psp_dkps_j[ijob], psp_xiza_j[ijob],
csp_schwarz_f, Dj[ijob]);
#endif
#ifndef PARA_SUB
ofmo_integ_ifc4c_sorted_partial( nworkers, workerid,
#else
int anworkers = anprocs * omp_get_num_threads();
int aworkerid = amyrank * omp_get_num_threads() + omp_get_thread_num();
ofmo_integ_ifc4c_sorted_partial( anworkers, aworkerid,
#endif
maxlqn,
mshel_atm[ifrag], mshel_ini[ifrag],
matom_x[ifrag], matom_y[ifrag], matom_z[ifrag],
lcs_pair_i[ijob],
csp_schwarz_i[ijob], csp_ics_i[ijob],
csp_jcs_i[ijob], csp_lps_pair_i[ijob],
psp_zeta_i[ijob], psp_dkps_i[ijob],
psp_xiza_i[ijob],
mshel_atm[jfrag], mshel_ini[jfrag],
matom_x[jfrag], matom_y[jfrag], matom_z[jfrag],
lcs_pair_j[ijob],
csp_schwarz_j[ijob], csp_ics_j[ijob],
csp_jcs_j[ijob], csp_lps_pair_j[ijob],
psp_zeta_j[ijob], psp_dkps_j[ijob],
psp_xiza_j[ijob],
mlcs[jfrag],
nj, Dj[ijob], Utmp );
#ifdef USE_CUDA
cuda_ifc4c_GetVfrg(ni, Utmp);
#endif
ofmo_scale_diag( ni, 0.5e0, Utmp );
dE_tmp = 8.e0 * ofmo_dot_product( ni2, Di[ijob], Utmp );
#ifdef FJ_COMP
#pragma omp critical
dE[ijob] += dE_tmp;
#else
#pragma omp atomic
dE[ijob] += dE_tmp;
#endif
} // for ( ijob=0 );
#ifdef USE_CUDA
Free(csp_schwarz_f);
cuda_ifc4c_Finalize();
#endif
#ifdef PARA_SUB
#pragma omp barrier
#pragma omp master
free(newjoblist);
#ifdef USE_CUDA
cuda_Reconfig(myrank, nprocs, comm);
#endif
ofmo_integ_set_loop_offset( mythread, 0 );
#endif /* PARA_SUB */
ofmo_acc_thread_timer( cid_integ, mythread );
} /* pragma omp parallel */
ofmo_acc_proc_timer( tid_integ );
MPI_Reduce( t0, t, MAXNJOB*3, MPI_DOUBLE, MPI_SUM, root, comm );
memcpy( t0, t, sizeof(double)*MAXNJOB*3 );
ofmo_acc_proc_timer( tid_intred );
if ( is_root ) {
for ( int i=0; i<njob; i++ ) {
fprintf(fp_prof, "#ES %3d - %3d dE, UjDi, UiDj, enucij ="
" %16.10f %16.10f %16.10f %16.10f\n", joblist[i*2+0],
joblist[i*2+1], dE[i], UjDi[i], UiDj[i], enucij[i] );
}
fflush( fp_prof );
int jn=0;
double pEtot=0.0e0;
for ( int i=0; i<njob; i++ ) {
Etot[i] = dE[i]+ enucij[i] + UjDi[i] + UiDj[i];
//energy_es_dimer += Etot[i];
//
//*energy0 += Etot[i];
pEtot += Etot[i];
if (++jn>NPARTIAL) {
energy_es_dimer += pEtot;
*energy0 += pEtot;
pEtot=0.0e0;
jn=0;
}
}
energy_es_dimer += pEtot;
*energy0 += pEtot;
}
ofmo_acc_proc_timer( tid_all );
ofmo_show_thread_timer_all();
ofmo_show_proc_timer_all();
return 0;
}
|
coordinate_common.h | /*!
* Copyright 2018 by Contributors
* \author Rory Mitchell
*/
#pragma once
#include <algorithm>
#include <string>
#include <utility>
#include <vector>
#include <limits>
#include "../common/random.h"
namespace xgboost {
namespace linear {
/**
* \brief Calculate change in weight for a given feature. Applies l1/l2 penalty normalised by the
* number of training instances.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
* \param w The weight.
* \param reg_alpha Unnormalised L1 penalty.
* \param reg_lambda Unnormalised L2 penalty.
*
* \return The weight update.
*/
inline double CoordinateDelta(double sum_grad, double sum_hess, double w,
double reg_alpha, double reg_lambda) {
if (sum_hess < 1e-5f) return 0.0f;
const double sum_grad_l2 = sum_grad + reg_lambda * w;
const double sum_hess_l2 = sum_hess + reg_lambda;
const double tmp = w - sum_grad_l2 / sum_hess_l2;
if (tmp >= 0) {
return std::max(-(sum_grad_l2 + reg_alpha) / sum_hess_l2, -w);
} else {
return std::min(-(sum_grad_l2 - reg_alpha) / sum_hess_l2, -w);
}
}
/**
* \brief Calculate update to bias.
*
* \param sum_grad The sum gradient.
* \param sum_hess The sum hess.
*
* \return The weight update.
*/
inline double CoordinateDeltaBias(double sum_grad, double sum_hess) {
return -sum_grad / sum_hess;
}
/**
* \brief Get the gradient with respect to a single feature.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradient(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator({static_cast<bst_uint>(fidx)});
while (iter->Next()) {
const ColBatch &batch = iter->Value();
ColBatch::Inst col = batch[0];
const auto ndata = static_cast<bst_omp_uint>(col.length);
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to a single feature. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param fidx The target feature.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for a given feature.
*/
inline std::pair<double, double> GetGradientParallel(int group_idx, int num_group, int fidx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
double sum_grad = 0.0, sum_hess = 0.0;
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator({static_cast<bst_uint>(fidx)});
while (iter->Next()) {
const ColBatch &batch = iter->Value();
ColBatch::Inst col = batch[0];
const auto ndata = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint j = 0; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
sum_grad += p.GetGrad() * v;
sum_hess += p.GetHess() * v * v;
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Get the gradient with respect to the bias. Row-wise multithreaded.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param gpair Gradients.
* \param p_fmat The feature matrix.
*
* \return The gradient and diagonal Hessian entry for the bias.
*/
inline std::pair<double, double> GetBiasGradientParallel(int group_idx, int num_group,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat) {
const RowSet &rowset = p_fmat->BufferedRowset();
double sum_grad = 0.0, sum_hess = 0.0;
const auto ndata = static_cast<bst_omp_uint>(rowset.Size());
#pragma omp parallel for schedule(static) reduction(+ : sum_grad, sum_hess)
for (bst_omp_uint i = 0; i < ndata; ++i) {
auto &p = gpair[rowset[i] * num_group + group_idx];
if (p.GetHess() >= 0.0f) {
sum_grad += p.GetGrad();
sum_hess += p.GetHess();
}
}
return std::make_pair(sum_grad, sum_hess);
}
/**
* \brief Updates the gradient vector with respect to a change in weight.
*
* \param fidx The feature index.
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dw The change in weight.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateResidualParallel(int fidx, int group_idx, int num_group,
float dw, std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dw == 0.0f) return;
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator({static_cast<bst_uint>(fidx)});
while (iter->Next()) {
const ColBatch &batch = iter->Value();
ColBatch::Inst col = batch[0];
// update grad value
const auto num_row = static_cast<bst_omp_uint>(col.length);
#pragma omp parallel for schedule(static)
for (bst_omp_uint j = 0; j < num_row; ++j) {
GradientPair &p = (*in_gpair)[col[j].index * num_group + group_idx];
if (p.GetHess() < 0.0f) continue;
p += GradientPair(p.GetHess() * col[j].fvalue * dw, 0);
}
}
}
/**
* \brief Updates the gradient vector based on a change in the bias.
*
* \param group_idx Zero-based index of the group.
* \param num_group Number of groups.
* \param dbias The change in bias.
* \param in_gpair The gradient vector to be updated.
* \param p_fmat The input feature matrix.
*/
inline void UpdateBiasResidualParallel(int group_idx, int num_group, float dbias,
std::vector<GradientPair> *in_gpair,
DMatrix *p_fmat) {
if (dbias == 0.0f) return;
const RowSet &rowset = p_fmat->BufferedRowset();
const auto ndata = static_cast<bst_omp_uint>(p_fmat->Info().num_row_);
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < ndata; ++i) {
GradientPair &g = (*in_gpair)[rowset[i] * num_group + group_idx];
if (g.GetHess() < 0.0f) continue;
g += GradientPair(g.GetHess() * dbias, 0);
}
}
/**
* \brief Abstract class for stateful feature selection or ordering
* in coordinate descent algorithms.
*/
class FeatureSelector {
public:
/*! \brief factory method */
static FeatureSelector *Create(int choice);
/*! \brief virtual destructor */
virtual ~FeatureSelector() = default;
/**
* \brief Setting up the selector state prior to looping through features.
*
* \param model The model.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
* \param param A parameter with algorithm-dependent use.
*/
virtual void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat,
float alpha, float lambda, int param) {}
/**
* \brief Select next coordinate to update.
*
* \param iteration The iteration in a loop through features
* \param model The model.
* \param group_idx Zero-based index of the group.
* \param gpair The gpair.
* \param p_fmat The feature matrix.
* \param alpha Regularisation alpha.
* \param lambda Regularisation lambda.
*
* \return The index of the selected feature. -1 indicates none selected.
*/
virtual int NextFeature(int iteration,
const gbm::GBLinearModel &model,
int group_idx,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) = 0;
};
/**
* \brief Deterministic selection by cycling through features one at a time.
*/
class CyclicFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return iteration % model.param.num_feature;
}
};
/**
* \brief Similar to Cyclyc but with random feature shuffling prior to each update.
* \note Its randomness is controllable by setting a random seed.
*/
class ShuffleFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
if (feat_index_.size() == 0) {
feat_index_.resize(model.param.num_feature);
std::iota(feat_index_.begin(), feat_index_.end(), 0);
}
std::shuffle(feat_index_.begin(), feat_index_.end(), common::GlobalRandom());
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return feat_index_[iteration % model.param.num_feature];
}
protected:
std::vector<bst_uint> feat_index_;
};
/**
* \brief A random (with replacement) coordinate selector.
* \note Its randomness is controllable by setting a random seed.
*/
class RandomFeatureSelector : public FeatureSelector {
public:
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
return common::GlobalRandom()() % model.param.num_feature;
}
};
/**
* \brief Select coordinate with the greatest gradient magnitude.
* \note It has O(num_feature^2) complexity. It is fully deterministic.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup(). That would reduce the complexity to
* O(num_feature*top_k).
*/
class GreedyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
const bst_uint ngroup = model.param.num_output_group;
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
if (counter_.size() == 0) {
counter_.resize(ngroup);
gpair_sums_.resize(model.param.num_feature * ngroup);
}
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-K or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1;
const int ngroup = model.param.num_output_group;
const bst_omp_uint nfeat = model.param.num_feature;
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator();
while (iter->Next()) {
const ColBatch &batch = iter->Value();
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const ColBatch::Inst col = batch[i];
const bst_uint ndata = col.length;
auto &sums = gpair_sums_[group_idx * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + group_idx];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
// Find a feature with the largest magnitude of weight change
int best_fidx = 0;
double best_weight_update = 0.0f;
for (bst_omp_uint fidx = 0; fidx < nfeat; ++fidx) {
auto &s = gpair_sums_[group_idx * nfeat + fidx];
float dw = std::abs(static_cast<bst_float>(
CoordinateDelta(s.first, s.second, model[fidx][group_idx], alpha, lambda)));
if (dw > best_weight_update) {
best_weight_update = dw;
best_fidx = fidx;
}
}
return best_fidx;
}
protected:
bst_uint top_k_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief Thrifty, approximately-greedy feature selector.
*
* \note Prior to cyclic updates, reorders features in descending magnitude of
* their univariate weight changes. This operation is multithreaded and is a
* linear complexity approximation of the quadratic greedy selection.
*
* \note It allows restricting the selection to top_k features per group with
* the largest magnitude of univariate weight change, by passing the top_k value
* through the `param` argument of Setup().
*/
class ThriftyFeatureSelector : public FeatureSelector {
public:
void Setup(const gbm::GBLinearModel &model,
const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda, int param) override {
top_k_ = static_cast<bst_uint>(param);
if (param <= 0) top_k_ = std::numeric_limits<bst_uint>::max();
const bst_uint ngroup = model.param.num_output_group;
const bst_omp_uint nfeat = model.param.num_feature;
if (deltaw_.size() == 0) {
deltaw_.resize(nfeat * ngroup);
sorted_idx_.resize(nfeat * ngroup);
counter_.resize(ngroup);
gpair_sums_.resize(nfeat * ngroup);
}
// Calculate univariate gradient sums
std::fill(gpair_sums_.begin(), gpair_sums_.end(), std::make_pair(0., 0.));
dmlc::DataIter<ColBatch> *iter = p_fmat->ColIterator();
while (iter->Next()) {
const ColBatch &batch = iter->Value();
// column-parallel is usually faster than row-parallel
#pragma omp parallel for schedule(static)
for (bst_omp_uint i = 0; i < nfeat; ++i) {
const ColBatch::Inst col = batch[i];
const bst_uint ndata = col.length;
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
auto &sums = gpair_sums_[gid * nfeat + i];
for (bst_uint j = 0u; j < ndata; ++j) {
const bst_float v = col[j].fvalue;
auto &p = gpair[col[j].index * ngroup + gid];
if (p.GetHess() < 0.f) continue;
sums.first += p.GetGrad() * v;
sums.second += p.GetHess() * v * v;
}
}
}
}
// rank by descending weight magnitude within the groups
std::fill(deltaw_.begin(), deltaw_.end(), 0.f);
std::iota(sorted_idx_.begin(), sorted_idx_.end(), 0);
bst_float *pdeltaw = &deltaw_[0];
for (bst_uint gid = 0u; gid < ngroup; ++gid) {
// Calculate univariate weight changes
for (bst_omp_uint i = 0; i < nfeat; ++i) {
auto ii = gid * nfeat + i;
auto &s = gpair_sums_[ii];
deltaw_[ii] = static_cast<bst_float>(CoordinateDelta(
s.first, s.second, model[i][gid], alpha, lambda));
}
// sort in descending order of deltaw abs values
auto start = sorted_idx_.begin() + gid * nfeat;
std::sort(start, start + nfeat,
[pdeltaw](size_t i, size_t j) {
return std::abs(*(pdeltaw + i)) > std::abs(*(pdeltaw + j));
});
counter_[gid] = 0u;
}
}
int NextFeature(int iteration, const gbm::GBLinearModel &model,
int group_idx, const std::vector<GradientPair> &gpair,
DMatrix *p_fmat, float alpha, float lambda) override {
// k-th selected feature for a group
auto k = counter_[group_idx]++;
// stop after either reaching top-N or going through all the features in a group
if (k >= top_k_ || counter_[group_idx] == model.param.num_feature) return -1;
// note that sorted_idx stores the "long" indices
const size_t grp_offset = group_idx * model.param.num_feature;
return static_cast<int>(sorted_idx_[grp_offset + k] - grp_offset);
}
protected:
bst_uint top_k_;
std::vector<bst_float> deltaw_;
std::vector<size_t> sorted_idx_;
std::vector<bst_uint> counter_;
std::vector<std::pair<double, double>> gpair_sums_;
};
/**
* \brief A set of available FeatureSelector's
*/
enum FeatureSelectorEnum {
kCyclic = 0,
kShuffle,
kThrifty,
kGreedy,
kRandom
};
inline FeatureSelector *FeatureSelector::Create(int choice) {
switch (choice) {
case kCyclic:
return new CyclicFeatureSelector();
case kShuffle:
return new ShuffleFeatureSelector();
case kThrifty:
return new ThriftyFeatureSelector();
case kGreedy:
return new GreedyFeatureSelector();
case kRandom:
return new RandomFeatureSelector();
default:
LOG(FATAL) << "unknown coordinate selector: " << choice;
}
return nullptr;
}
} // namespace linear
} // namespace xgboost
|
utils.h | #define MIN(a,b) (((a<b)?a:b))
static inline unsigned long long cycles()
{
unsigned long long u;
asm volatile ("rdtscp;shlq $32,%%rdx;orq %%rdx,%%rax;movq %%rax,%0":"=q"(u)::"%rax", "%rdx", "rcx");
return u;
}
//
double myseconds()
{
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
//
void init(double* p, int n, int m)
{
#pragma omp parallel for
for (int j = 0; j < n; ++j)
{
for (int i = 0; i < n; i++)
{
if ( ((i == 0) || (j == 0)) || (i == n-1) || (j == m-1) )
p[j*n + i] = 1.;
else
p[j*n + i] = 0.;
}
}
}
double maxNorm(double* v1, double* v2, int size)
{
double mymax = 0.;
#pragma omp parallel for reduction(max: mymax)
for (int ii = 0; ii < size; ++ii)
{
if (fabs(*v1 - *v2) > mymax)
{
mymax = fabs(*v1 - *v2);
}
++v1; ++v2;
}
return mymax;
}
double l2Norm(double* v1, double* v2, int size)
{
double myl2 = 0.;
#pragma omp parallel for reduction(+: myl2)
for (int ii = 0; ii < size; ++ii)
{
myl2 += fabs(v1[ii]-v2[ii])*(v1[ii] - v2[ii]);
}
return sqrt(myl2)/size;
}
void print(double* p, int m, int n)
{
for (int i=0; i < MIN(n, 15); ++i)
{
for (int j=MIN(m, 15); j > 0; --j)
{
printf("%e ", *p);
++p;
}
p += m - MIN(m, 15);
printf("\n");
}
}
|
graph.h | // Copyright 2020 The Google Research Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef GRAPH_HPP
#define GRAPH_HPP
#include <chrono> // NOLINT
#include <type_traits>
#include <vector>
#include "common.h" // NOLINT
#include "mmapped_vector.h" // NOLINT
#include "pkxsort.h" // NOLINT
enum class GraphFormat { kNDE, kBinary, kBinaryStreamed };
enum class GraphPermutation { kNone, kDegreeOrder, kDegeneracyOrder };
GraphFormat ParseGraphFormat(const std::string &fmt) {
if (fmt == "nde") return GraphFormat::kNDE;
if (fmt == "bin") return GraphFormat::kBinary;
if (fmt == "sbin") return GraphFormat::kBinaryStreamed;
fprintf(stderr, "Invalid graph format.\n");
abort();
}
template <typename node_t, typename edge_t>
struct GraphT {
static_assert(std::is_unsigned<node_t>::value,
"node_t must be an unsigned type");
static_assert(std::is_unsigned<edge_t>::value,
"edge_t must be an unsigned type");
// Graph fingerprints encode the size of edge and node types, which are
// integral types of up to 8 bytes each.
static constexpr size_t kFingerprint = (sizeof(edge_t) << 4) | sizeof(node_t);
static constexpr size_t kFingerprintStreamed = kFingerprint | 1;
mmapped_vector<edge_t> adj_start;
mmapped_vector<node_t> adj;
size_t N() const { return adj_start.size() - 1; }
// Reads a graph from `filename` in the format specified by `format`. If
// `permutation` is not `kNone`, the graph is permuted accordingly. If
// `forward_only` is true, only edges {a, b} with a < b will be returned.
// Otherwise, both pairs {a, b} and {b, a} will be present in the graph. Note
// that `kDegeneracyOrder` is only valid if `forward_only == false`.
void Read(
const std::string &filename, GraphFormat format,
GraphPermutation permutation, bool forward_only,
std::chrono::high_resolution_clock::time_point *reading_done = nullptr,
std::chrono::high_resolution_clock::time_point *permutation_computed =
nullptr) {
FILE *f = filename.empty() ? stdin : fopen(filename.c_str(), "r");
mmapped_vector<std::pair<node_t, node_t>> adj_pairs;
// Temporarily deallocate adj.
std::string backing_file = adj.BackingFile();
adj.clear();
// Read the graph.
size_t N;
switch (format) {
case GraphFormat::kNDE:
N = ReadNDEGraph(f, forward_only, backing_file, &adj_pairs);
break;
case GraphFormat::kBinary:
N = ReadBinaryGraph(f, forward_only, backing_file, &adj_pairs);
break;
case GraphFormat::kBinaryStreamed:
N = ReadBinaryStreamedGraph(f, forward_only, backing_file, &adj_pairs);
break;
default:
fprintf(stderr, "Invalid graph format enum.\n");
abort();
}
if (!filename.empty()) fclose(f);
// Clean up duplicate edges.
// TODO: external memory version
fprintf(stderr, "Sorting...\n");
kx::radix_sort(adj_pairs.data(), adj_pairs.data() + adj_pairs.size(),
EdgeRadixTraits());
fprintf(stderr, "Cleaning...\n");
adj_pairs.resize(
std::unique(adj_pairs.data(), adj_pairs.data() + adj_pairs.size()) -
adj_pairs.data());
fprintf(stderr, "Reading done\n");
if (reading_done) *reading_done = std::chrono::high_resolution_clock::now();
// Compute permutation and permute the graph.
if (permutation != GraphPermutation::kNone) {
CHECK(!forward_only || permutation == GraphPermutation::kDegreeOrder);
std::vector<node_t> perm = permutation == GraphPermutation::kDegreeOrder
? ComputeDegreeOrder(N, &adj_pairs)
: ComputeDegeneracyOrder(N, &adj_pairs);
fprintf(stderr, "Permutation computed\n");
if (permutation_computed) {
*permutation_computed = std::chrono::high_resolution_clock::now();
}
std::vector<node_t> reverse_permutation(N);
#pragma omp parallel for
for (node_t i = 0; i < N; i++) {
reverse_permutation[perm[i]] = i;
}
perm.clear();
#pragma omp parallel for
for (size_t i = 0; i < adj.size(); i++) {
std::pair<node_t, node_t> &e = adj_pairs[i];
e.first = reverse_permutation[e.first];
e.second = reverse_permutation[e.second];
if (forward_only && e.first > e.second) std::swap(e.first, e.second);
}
fprintf(stderr, "Sorting again...\n");
kx::radix_sort(adj_pairs.begin(), adj_pairs.end(), EdgeRadixTraits());
fprintf(stderr, "Permuting done\n");
}
// Compute degrees, final adjacency lists and their start position.
adj_start.resize(N + 1);
std::fill(adj_start.begin(), adj_start.end(), 0);
for (size_t i = 0; i < adj_pairs.size(); i++) {
adj_start[adj_pairs[i].first + 1]++;
}
for (size_t i = 0; i < N; i++) {
adj_start[i + 1] += adj_start[i];
}
adj.reinterpret(std::move(adj_pairs));
// TODO: parallel
for (edge_t i = 0; 2 * i + 1 < adj.size(); i += 1) {
adj[i] = adj[2 * i + 1];
}
adj.resize(adj.size() / 2);
adj.shrink();
}
private:
struct EdgeRadixTraits {
static const int nBytes = 2 * sizeof(node_t);
size_t Value(const std::pair<node_t, node_t> &x) {
return ((size_t)x.first << 32) | x.second;
}
int kth_byte(const std::pair<node_t, node_t> &x, int k) {
return (Value(x) >> (8 * k)) & 0xff;
}
bool compare(const std::pair<node_t, node_t> &x,
const std::pair<node_t, node_t> &y) {
return Value(x) < Value(y);
}
};
size_t ReadNDEGraph(FILE *f, bool forward_only,
const std::string &backing_file,
mmapped_vector<std::pair<node_t, node_t>> *adj_pairs) {
// Number of nodes (first line).
size_t N = ReadBase10Fast<node_t>(f);
// Degrees (N lines).
node_t a, b;
size_t expected_edges = 0;
for (node_t i = 0; i < N; i++) {
a = ReadBase10Fast<node_t>(f);
b = ReadBase10Fast<node_t>(f);
expected_edges += b;
}
adj_pairs->init(backing_file, expected_edges,
/*reserve_only = */ true);
// Edges (all other lines).
while (true) {
a = ReadBase10Fast<node_t>(f);
b = ReadBase10Fast<node_t>(f);
if (a == (node_t)EOF || b == (node_t)EOF) break;
if (a == b) continue;
if (forward_only && b < a) std::swap(a, b);
adj_pairs->push_back({a, b});
if (!forward_only) adj_pairs->push_back({b, a});
}
return N;
}
size_t ReadBinaryGraph(FILE *f, bool forward_only,
const std::string &backing_file,
mmapped_vector<std::pair<node_t, node_t>> *adj_pairs) {
// Fingerprint.
unsigned long long_t fingerprint = ReadBinaryOrDie<unsigned long long_t>(f);
CHECK(fingerprint == kFingerprint);
// Number of nodes.
size_t N = ReadBinaryOrDie<node_t>(f);
// Offsets of each adjacency list.
adj_start.resize(N + 1);
ReadBinaryOrDie(f, adj_start.data(), N + 1);
std::vector<node_t> current_adj;
adj_pairs->init(backing_file, adj_start.back(),
/*reserve_only = */ true);
// Edges.
for (node_t i = 0; i < N; i++) {
size_t degree = adj_start[i + 1] - adj_start[i];
current_adj.reserve(degree);
ReadBinaryOrDie(f, current_adj.data(), degree);
for (node_t j = 0; j < adj_start.size(); j++) {
node_t a = i;
node_t b = current_adj[j];
if (a == b) continue;
if (forward_only && b < a) std::swap(a, b);
adj_pairs->push_back({a, b});
if (!forward_only) adj_pairs->push_back({b, a});
}
}
return N;
}
size_t ReadBinaryStreamedGraph(
FILE *f, bool forward_only, const std::string &backing_file,
mmapped_vector<std::pair<node_t, node_t>> *adj_pairs) {
// Fingerprint.
unsigned long long_t fingerprint = ReadBinaryOrDie<unsigned long long_t>(f);
CHECK(fingerprint == kFingerprintStreamed);
// Number of nodes.
size_t N = ReadBinaryOrDie<node_t>(f);
adj_start.resize(N + 1);
std::vector<node_t> current_adj;
// Size is unknown at this point - we guess at least N.
adj_pairs->init(backing_file, N, /*reserve_only = */ true);
// Edges.
for (node_t i = 0; i < N; i++) {
// Degree.
size_t degree = ReadBinaryOrDie<node_t>(f);
current_adj.reserve(degree);
ReadBinaryOrDie(f, current_adj.data(), degree);
for (node_t j = 0; j < adj_start.size(); j++) {
node_t a = i;
node_t b = current_adj[j];
if (a == b) continue;
if (forward_only && b < a) std::swap(a, b);
adj_pairs->push_back({a, b});
if (!forward_only) adj_pairs->push_back({b, a});
}
}
return N;
}
struct DegreeRadixTraits {
static const int nBytes = sizeof(node_t);
int kth_byte(const node_t &x, int k) {
return (degree[x] >> (8 * k)) & 0xff;
}
bool compare(const node_t &x, const node_t &y) {
return degree[x] < degree[y];
}
const std::vector<node_t> °ree;
};
// Sort node by increasing order.
std::vector<node_t> ComputeDegreeOrder(
size_t N, mmapped_vector<std::pair<node_t, node_t>> *adj_pairs) {
std::vector<node_t> permutation(N);
std::vector<node_t> degree(N);
#pragma omp parallel for
for (node_t i = 0; i < N; i++) permutation[i] = i;
#pragma omp parallel for
for (size_t i = 0; i < adj.size(); i++) {
degree[(*adj_pairs)[i].first]++;
degree[(*adj_pairs)[i].second]++;
}
// TODO: external memory version
kx::radix_sort(permutation.begin(), permutation.end(),
DegreeRadixTraits{degree});
return permutation;
}
// Sort nodes in degeneracy order
// (https://en.wikipedia.org/wiki/Degeneracy_(graph_theory)) by iteratively
// removing lowest-degree nodes.
// Here, we assume that the graph is sufficiently small that
// O(number_of_edges) fits in main memory.
// TODO: avoid the extra copy of the edges.
std::vector<node_t> ComputeDegeneracyOrder(
size_t N, mmapped_vector<std::pair<node_t, node_t>> *adj_pairs) {
std::vector<std::vector<node_t>> graph(N);
for (auto edg : *adj_pairs) {
graph[edg.first].push_back(edg.second);
}
std::vector<node_t> permutation;
std::vector<std::vector<node_t>> nodes_by_degree(N);
std::vector<node_t> degrees(N);
std::vector<node_t> positions(N);
std::vector<bool> used(N);
std::vector<edge_t> adj_starts;
for (node_t i = 0; i < N; i++) {
nodes_by_degree[graph[i].size()].push_back(i);
degrees[i] = graph[i].size();
positions[i] = nodes_by_degree[degrees[i]].size() - 1;
}
node_t j = 0;
for (node_t i = 0; i < N; i++) {
while (nodes_by_degree[j].empty()) j++;
node_t v = nodes_by_degree[j].back();
nodes_by_degree[j].pop_back();
permutation.push_back(v);
used[v] = true;
for (auto g : graph[v]) {
if (used[g]) continue;
node_t &to_swap = nodes_by_degree[degrees[g]][positions[g]];
std::swap(to_swap, nodes_by_degree[degrees[g]].back());
positions[to_swap] = positions[g];
nodes_by_degree[degrees[g]].pop_back();
degrees[g]--;
nodes_by_degree[degrees[g]].push_back(g);
positions[g] = nodes_by_degree[degrees[g]].size() - 1;
}
if (j > 0) j--;
}
return permutation;
}
};
#endif
|
Scene.h | #pragma once
#include <vector>
#include <random>
#include <cmath>
#include "Vec3.h"
#include "Vertex.h"
#include "Camera.h"
#include "Ray.h"
#include "Triangle.h"
#include "Mesh.h"
#include "LightSource.h"
using namespace std;
void printProgressBar(float prop) {
int progress = round(50.0f * prop);
string progressBar = "";
for (int i=0; i<progress; i++) {
progressBar += "\u2588";
}
std::cout << "Raytracing... [" << progressBar << string(50 - progress, ' ') << "] " << progress * 2 << "%\r" << flush;
}
Vec3f sample_along(Vec3f v) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
v.normalize();
Vec3f v2, v3;
v.getTwoOrthogonals(v2, v3);
v2.normalize();
v3.normalize();
float theta = asin(dis(gen));
float phi = 2 * M_PI * dis(gen);
Vec3f dir = v2 * cos(phi) + v3 * sin(phi);
dir.normalize();
return normalize(v * cos(theta) + dir * sin(theta));
}
Vec3f jit_sample(int sample_idx, int n_samples) {
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
int d = int(sqrt(float(n_samples)));
int j2 = sample_idx / d;
int i2 = sample_idx % d;
float x = (float(i2) + dis(gen)) / float(d);
float y = (float(j2) + dis(gen)) / float(d);
return {x, y, 42};
}
class Scene {
public:
vector<Mesh> m_meshes;
Camera m_cam;
vector<LightSource> m_lights;
int m_n_samples; // Should be a square of an integer, so that jittered sampling works properly
Scene() {
m_meshes = vector<Mesh>();
m_cam = Camera();
m_lights = vector<LightSource>();
m_n_samples = 4;
}
void rayTrace(Image &im) {
// #pragma omp parallel for
for (int i = 0; i < im.m_width; i++) {
printProgressBar((float)(i + 1) / (float)im.m_width);
#pragma omp parallel for
for (int j = 0; j < im.m_height; j++) {
im.m_data[j * im.m_width + i] = {0, 0, 0};
#pragma omp parallel for
for (int sample_idx = 0; sample_idx < m_n_samples; sample_idx++) {
// Vec3f noise = jit_sample(sample_idx, m_n_samples);
Vec3f noise = {0.5, 0.5, 0}; // Remove the noise since we are taking only one sample
float x = (float(i) + noise[0]) / float(im.m_width);
float y = (float(j) + noise[1]) / float(im.m_height);
Ray rij = m_cam.launch_ray(x, y);
// im.m_data[j * im.m_width + i] += recurse_ray(rij, 0);
im.m_data[j * im.m_width + i] += ray_normal_only(rij);
}
im.m_data[j * im.m_width + i] *= 1 / float(m_n_samples);
}
}
}
/*
* Checks if ls is visible from v
*
*/
bool is_visible(Vec3f v, Vec3f ls) {
Ray rvl = Ray(v, normalize(ls - v));
for (Mesh const &m : this->m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(rvl, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
float t = intersection[3];
if (t <= (ls - v).length())
return false;
}
}
return true;
}
Vec3f colorize(const vector<float> &intersection, const Vec3i &t, const Mesh &m, Vec3f rayDir) {
// rng from https://en.cppreference.com/w/cpp/numeric/random/uniform_real_distribution
std::random_device rd; //Will be used to obtain a seed for the random number engine
std::mt19937 gen(rd()); //Standard mersenne_twister_engine seeded with rd()
std::uniform_real_distribution<> dis(0.0, 1.0);
Vec3f normal_at_point = intersection[0] * m.m_vertices[t[0]].m_normal
+ intersection[1] * m.m_vertices[t[1]].m_normal
+ intersection[2] * m.m_vertices[t[2]].m_normal;
normal_at_point.normalize();
Vec3f intersection_position = intersection[0] * m.m_vertices[t[0]].m_point
+ intersection[1] * m.m_vertices[t[1]].m_point
+ intersection[2] * m.m_vertices[t[2]].m_point;
Vec3f overall_color;
Vec3f color;
for (LightSource light : this->m_lights) {
Vec3f random_source;
switch (light.m_type)
{
case L_AMBIENT:
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.m_diffuse_coef * m.m_material.diffuse_response(intersection_position);
break;
case L_POINT:
if (is_visible(intersection_position + 2 * __FLT_EPSILON__ * normal_at_point, light.m_position)) {
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.evaluateColorResponse(normal_at_point,
light.m_position - intersection_position,
-rayDir,
intersection_position);
}
else {
color = {0, 0, 0};
}
break;
case L_RECTANGLE:
random_source = light.m_position + float(dis(gen)) * light.m_vec1 + float(dis(gen)) * light.m_vec2;
if (is_visible(intersection_position + 2 * __FLT_EPSILON__ * normal_at_point, random_source)) {
color = light.m_intensity * light.m_color * max(0.0f, dot(normal_at_point, -rayDir));
color *= m.m_material.evaluateColorResponse(normal_at_point,
random_source - intersection_position,
-rayDir,
intersection_position);
}
else {
color = {0, 0, 0};
}
break;
default:
color = {0, 0, 0};
break;
}
overall_color += color;
}
return overall_color;
}
Vec3f recurse_ray(Ray r, int depth) {
if (depth > 3)
return {0, 0, 0};
Vec3f this_ray_color = {0, 0, 0};
vector<float> nearest_intersection = {};
Vec3i nearest_t;
const Mesh *nearest_m;
for (Mesh const &m : m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(r, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
if (nearest_intersection.size() == 0 || nearest_intersection[3] > intersection[3]) {
nearest_intersection = intersection;
nearest_t = t;
nearest_m = &m;
}
}
}
if (nearest_intersection.size() > 0) {
this_ray_color = this->colorize(nearest_intersection, nearest_t, *nearest_m, r.m_direction);
Vec3f normal_at_point = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_normal
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_normal
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_normal;
normal_at_point.normalize();
Vec3f intersection_position = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_point
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_point
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_point;
// Sample along normal
Vec3f random_vector = sample_along(normal_at_point);
// random_vector = -r.m_direction + 2.0f * (r.m_direction - dot(r.m_direction, normal_at_point) * normal_at_point); // Perfect reflection
Vec3f recursed_color = recurse_ray(Ray(intersection_position, random_vector), depth + 1)
* nearest_m->m_material.evaluateColorResponse(normal_at_point,
random_vector,
-r.m_direction,
intersection_position);
return this_ray_color + recursed_color;
}
else {
return {0, 0, 0};
}
}
Vec3f ray_normal_only(Ray r) {
Vec3f this_ray_color = {0, 0, 0};
vector<float> nearest_intersection = {};
Vec3i nearest_t;
const Mesh *nearest_m;
int mesh_id = 0;
int nearest_id = -1;
for (Mesh const &m : m_meshes) {
Vec3i t;
vector<float> intersection = m.m_bvh.intersection(r, t, m.m_vertices, m.m_triangles);
if (intersection.size() > 0) {
if (nearest_intersection.size() == 0 || nearest_intersection[3] > intersection[3]) {
nearest_intersection = intersection;
nearest_t = t;
nearest_id = mesh_id;
nearest_m = &m;
}
}
mesh_id++;
}
if (nearest_intersection.size() > 0) {
Vec3f normal_at_point = nearest_intersection[0] * nearest_m->m_vertices[nearest_t[0]].m_normal
+ nearest_intersection[1] * nearest_m->m_vertices[nearest_t[1]].m_normal
+ nearest_intersection[2] * nearest_m->m_vertices[nearest_t[2]].m_normal;
normal_at_point.normalize();
return {dot(-m_cam.reference_frame()[1], normal_at_point),
dot(-m_cam.reference_frame()[2], normal_at_point),
float(nearest_id)}; // We do not need the third channel to know where we are on the picture, so we use it to know which mesh we are on
}
else {
return {0, 0, 0};
}
}
};
|
DilationFilter.h | /*
* ErosionFilter.h
*
* Created on: 13.06.2016
* Author: Darius Malysiak
*/
#ifndef IMAGEPROCESSING_DILATIONFILTER_H_
#define IMAGEPROCESSING_DILATIONFILTER_H_
#include "../BaseObject.h"
#include "../DataStructures/Matrix.h"
#include "../DataStructures/Image.h"
namespace Lazarus {
template<typename T>
class DilationFilter: public Lazarus::BaseObject {
public:
static const Lazarus::Matrix2<double>* get_DILATION3x3_KERNEL(double val)
{
static Lazarus::Matrix2<double> _EROSION_KERNEL;
_EROSION_KERNEL.initMatrix(3,3);
_EROSION_KERNEL.setData(0,0,val);
_EROSION_KERNEL.setData(0,1,val);
_EROSION_KERNEL.setData(0,2,val);
_EROSION_KERNEL.setData(1,0,val);
_EROSION_KERNEL.setData(1,1,val);
_EROSION_KERNEL.setData(1,2,val);
_EROSION_KERNEL.setData(2,0,val);
_EROSION_KERNEL.setData(2,1,val);
_EROSION_KERNEL.setData(2,2,val);
return &_EROSION_KERNEL;
}
static const Lazarus::Matrix2<double>* get_DILATION_KERNEL(double val, unsigned int size)
{
Lazarus::Matrix2<double>* _EROSION_KERNEL = new Lazarus::Matrix2<double>();
_EROSION_KERNEL->initMatrix(size,size);
_EROSION_KERNEL->globalSetMatrixVal(val);
return _EROSION_KERNEL;
}
DilationFilter()
{
mp_filter_mask = NULL;
}
virtual ~DilationFilter(){}
void setDilationKernel(const Lazarus::Matrix2<double>* filter)
{
this->mp_filter_mask = filter;
}
/**
* We assume a kernel with odd dimensions. The erosion will be computed on an extended image with black borders
* such that the kernel can be positioned onto the first image pixel.
* Returns the filtered image in case of success otherwise NULL.
**/
Lazarus::Image<T>* filterImage( Lazarus::Image<T>* image, double clamping_val=255.0 )
{
unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2;
unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2;
unsigned int image_width = image->getm_width();
unsigned int image_heigth = image->getm_height();
unsigned int channel_count = image->getm_channel_count();
unsigned int filter_width = mp_filter_mask->getColumnCount();
unsigned int filter_height = mp_filter_mask->getRowCount();
if(filter_width % 2 != 1)
{
printf("filter width %d is not odd\n",filter_width);
return NULL;
}
if(filter_height % 2 != 1)
{
printf("filter height %d is not odd\n",filter_height);
return NULL;
}
Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() );
Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x,
image_heigth + 2*offset_y, image->getm_data_alignment() );
//fill the output and temp image with black
Lazarus::FastKTuple<T> color(channel_count);
for(unsigned int i=0; i< channel_count; i++)
{
color.setElement(i,0);
}
output->fillImageFast( &color );
temporary->fillImageFast( &color );
//copy the input image into the temp buffer;
for(unsigned int i=0; i<image_width; i++)
{
for(unsigned int j=0; j<image_heigth; j++)
{
image->getPixelFast( &color,i,j );
temporary->setPixelFast(&color,offset_x + i,offset_y + j);
}
}
//start the convolution process
//over every pixel
unsigned int c_limit = 0;
if(channel_count > 3)
c_limit = 3;
else
c_limit = channel_count;
double dmin = std::numeric_limits<double>::min();
#pragma omp parallel for
for(unsigned int i=offset_x; i<image_width+(offset_x); i++)
{
double temp_value = dmin;
double filter_value = 0;
Lazarus::FastKTuple<T> new_color(channel_count);
Lazarus::FastKTuple<T> color_(channel_count);
for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++)
{
//over every color channel
for(unsigned int c=0; c<c_limit; c++)
{
//erosion
for(int k=-offset_x; k<=(int)offset_x; ++k)
{
for(int l=-offset_y; l<=(int)offset_y; ++l)
{
temporary->getPixelFast(&color_, (unsigned int)((int)i+k),
(unsigned int)((int)j+l));
filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k),
(unsigned int)((int)offset_y+l) );
if( (double)(color_.getElement(c)) + filter_value > temp_value )
{
temp_value = (double)(color_.getElement(c))-filter_value;
}
}
}
new_color.setElement(c,(T)std::min(std::max(temp_value,(double)std::numeric_limits<T>::min()),clamping_val));
temp_value=std::numeric_limits<double>::min();//reset
}
//set the alpha value to the image value
if(channel_count>3)
{
new_color.setElement(3,color_.getElement(3));
}
output->setPixelFast(&new_color,i-(offset_x),j-(offset_y));
}
}
//delete the temporary image
delete temporary;
return output;
}
/**
* We assume a kernel with odd dimensions. The erosion will be computed on an extended image with black borders
* such that the kernel can be positioned onto the first image pixel.
* Returns the filtered image in case of success otherwise NULL.
**/
Lazarus::Image<T>* filterImageBW( Lazarus::Image<T>* image, double white=255.0 )
{
unsigned int offset_x = (mp_filter_mask->getColumnCount()-1)/2;
unsigned int offset_y = (mp_filter_mask->getRowCount()-1)/2;
unsigned int image_width = image->getm_width();
unsigned int image_heigth = image->getm_height();
unsigned int channel_count = image->getm_channel_count();
unsigned int filter_width = mp_filter_mask->getColumnCount();
unsigned int filter_height = mp_filter_mask->getRowCount();
if(filter_width % 2 != 1)
{
printf("filter width %d is not odd\n",filter_width);
return NULL;
}
if(filter_height % 2 != 1)
{
printf("filter height %d is not odd\n",filter_height);
return NULL;
}
Lazarus::Image<T>* output = new Lazarus::Image<T>( image_width, image_heigth, image->getm_data_alignment() );
Lazarus::Image<T>* temporary = new Lazarus::Image<T>( image_width + 2*offset_x,
image_heigth + 2*offset_y, image->getm_data_alignment() );
//fill the output and temp image with black
Lazarus::FastKTuple<T> color(channel_count);
for(unsigned int i=0; i< channel_count; i++)
{
color.setElement(i,0);
}
output->fillImageFast( &color );
temporary->fillImageFast( &color );
//copy the input image into the temp buffer;
for(unsigned int i=0; i<image_width; i++)
{
for(unsigned int j=0; j<image_heigth; j++)
{
image->getPixelFast( &color,i,j );
temporary->setPixelFast(&color,offset_x + i,offset_y + j);
}
}
//start the convolution process
//over every pixel
#pragma omp parallel for
for(unsigned int i=offset_x; i<image_width+(offset_x); i++)
{
bool match = false;
double filter_value = 0;
Lazarus::FastKTuple<T> new_color(channel_count);
Lazarus::FastKTuple<T> color_(channel_count);
unsigned int c_limit = std::max(channel_count,(unsigned int)3);
for(unsigned int j=offset_y; j<image_heigth+(offset_y); j++)
{
//over every color channel
for(unsigned int c=0; c<c_limit; c++)
{
//erosion
for(int k=-offset_x; k<=(int)offset_x; ++k)
{
for(int l=-offset_y; l<=(int)offset_y; ++l)
{
temporary->getPixelFast(&color_, (unsigned int)((int)i+k),
(unsigned int)((int)j+l));
filter_value = mp_filter_mask->getData((unsigned int)((int)offset_x+k),
(unsigned int)((int)offset_y+l) );
if( color_.getElement(c) == filter_value )
{
match = true;
break;
}
}
if(match == true)//early break out of outer loop
{
break;
}
}
if(match == true)
{
new_color.setElement(c,(T)white);
}
match=false;//reset
}
//set the alpha value to the image value
if(channel_count>3)
{
new_color.setElement(3,color_.getElement(3));
}
output->setPixelFast(&new_color,i-(offset_x),j-(offset_y));
}
}
//delete the temporary image
delete temporary;
return output;
}
private:
const Lazarus::Matrix2<double>* mp_filter_mask;
};
}
#endif /* IMAGEPROCESSING_EROSIONFILTER_H_ */
|
def.h | /**
* @author : Zhao Chonyyao (cyzhao@zju.edu.cn)
* @date : 2021-04-30
* @description: functional and constraint definition
* @version : 1.0
*/
#ifndef NUMERIC_DEF_H
#define NUMERIC_DEF_H
#include <memory>
#include <iostream>
#include <Eigen/Sparse>
#include "data_str_core.h"
#include "error.h"
#include "config.h"
namespace PhysIKA {
template <typename T, size_t dim>
using data_ptr = std::shared_ptr<dat_str_core<T, dim>>;
/**
* Functional interface.
*
* sample usage:
* Functional->Val(x, data); // to get value.
* Functional->Gra(x, data); // to get gradient.
* Functional->Hes(x, data); // to get hessian.
*
*/
template <typename T, size_t dim>
class Functional
{
public:
virtual ~Functional() {}
virtual size_t Nx() const = 0;
virtual int Val(const T* x, data_ptr<T, dim>& data) const = 0;
virtual int Gra(const T* x, data_ptr<T, dim>& data) const = 0;
virtual int Hes(const T* x, data_ptr<T, dim>& data) const = 0;
virtual int Val_Gra_Hes(const T* x, data_ptr<T, dim>& data) const
{
IF_ERR(return, Val(x, data));
IF_ERR(return, Gra(x, data));
IF_ERR(return, Hes(x, data));
return 0;
}
};
/**
* Constraint
*
* sample usage:
* Functional->Val(x, data); // to get value.
* Functional->Jac(x, data); // to get Jacobian
* Functional->Hes(x, data); // to get hessian.
*
*/
template <typename T>
class Constraint
{
public:
virtual ~Constraint() {}
virtual size_t Nx() const = 0;
virtual size_t Nf() const = 0;
virtual int Val(const T* x, T* val) const = 0;
virtual int Jac(const T* x, const size_t off, std::vector<Eigen::Triplet<T>>* jac) const = 0;
virtual int Hes(const T* x, const size_t off, std::vector<std::vector<Eigen::Triplet<T>>>* hes) const
{
return __LINE__;
}
};
/**
* null input exception, if no input, then throw this exception.
*
*/
class null_input_exception : public std::exception
{
public:
const char* what() const throw()
{
return "null input exception";
}
};
/**
* compatibility exception, if not compatible, then throw this exception.
*
*/
class compatibility_exception : public std::exception
{
public:
const char* what() const throw()
{
return "compatibility exception";
}
};
template <typename T, size_t dim>
class energy_t;
template <typename T, size_t dim>
std::shared_ptr<energy_t<T, dim>> build_energy_t(const std::vector<std::shared_ptr<Functional<T, dim>>>& buffer)
{
size_t total_dim = -1;
for (auto& e : buffer)
{
if (e.get())
{
total_dim = e->Nx();
break;
}
}
if (total_dim == -1)
{
throw null_input_exception();
}
for (auto& e : buffer)
{
if (e.get() && e->Nx() != total_dim)
{
throw compatibility_exception();
}
}
return std::make_shared<energy_t<T, dim>>(buffer, total_dim);
}
/**
* energy class. the collection for some functionals.
*
*/
template <typename T, size_t dim>
class energy_t : public Functional<T, dim>
{
public:
energy_t(const std::vector<std::shared_ptr<Functional<T, dim>>>& buffer, const size_t total_dim)
: buffer_(buffer), dim_(total_dim) {}
public:
size_t Nx() const override
{
return dim_;
}
int Val(const T* x, std::shared_ptr<dat_str_core<T, dim>>& data) const
{
assert(x);
for (auto& e : buffer_)
{
if (e.get())
{
IF_ERR(return, e->Val(x, data));
}
}
return 0;
}
int Gra(const T* x, std::shared_ptr<dat_str_core<T, dim>>& data) const
{
assert(x);
for (auto& e : buffer_)
{
if (e.get())
{
IF_ERR(return, e->Gra(x, data));
}
}
return 0;
}
int Hes(const T* x, std::shared_ptr<dat_str_core<T, dim>>& data) const
{
assert(x);
for (auto& e : buffer_)
{
if (e.get())
{
IF_ERR(return, e->Hes(x, data));
}
}
return 0;
}
public:
const std::vector<std::shared_ptr<Functional<T, dim>>>& buffer_;
size_t dim_;
};
template <typename T>
class constraint_t;
template <typename T>
std::shared_ptr<constraint_t<T>> build_constraint_t(const std::vector<std::shared_ptr<Constraint<T>>>& buffer)
{
size_t xdim = -1;
for (auto& e : buffer)
{
if (e.get())
{
xdim = e->Nx();
break;
}
}
if (xdim == -1)
throw null_input_exception();
bool compatible = true;
for (auto& c : buffer)
{
if (c.get())
{
if (c->Nx() != xdim)
compatible = false;
}
}
if (!compatible)
throw compatibility_exception();
return std::make_shared<constraint_t<T>>(buffer, xdim);
}
/**
* constraint type class, collection of some constraint.
*
*/
template <typename T>
class constraint_t : public Constraint<T>
{
public:
template <typename T2>
friend std::shared_ptr<constraint_t<T2>> build_constraint_t(const std::vector<std::shared_ptr<Constraint<T2>>>& buffer);
constraint_t(const std::vector<std::shared_ptr<Constraint<T>>>& buffer, const size_t xdim)
: buffer_(buffer), xdim_(xdim) {}
public:
size_t Nx() const
{
return xdim_;
}
size_t Nf() const
{
size_t fdim = 0;
for (auto& c : buffer_)
{
if (c.get())
fdim += c->Nf();
}
return fdim;
}
int Val(const T* x, T* val) const
{
assert(x && val);
Eigen::Map<Eigen::Matrix<T, -1, 1>> v(val, Nf());
size_t offset = 0;
for (auto& c : buffer_)
{
if (c.get())
{
const size_t nf = c->Nf();
Eigen::Matrix<T, -1, 1> value(nf);
value.setZero();
IF_ERR(return, c->Val(x, value.data()));
v.segment(offset, nf) += value;
offset += nf;
}
}
return 0;
}
int Jac(const T* x, const size_t off, std::vector<Eigen::Triplet<T>>* jac) const
{
assert(x && jac);
size_t offset = off;
for (auto& c : buffer_)
{
if (c.get())
{
IF_ERR(return, c->Jac(x, offset, jac));
offset += c->Nf();
}
}
return 0;
}
int Hes(const T* x, const size_t off, std::vector<std::vector<Eigen::Triplet<T>>>* hes) const
{
assert(x && hes);
const size_t fdim = Nf();
if (hes->size() != fdim)
hes->resize(fdim);
size_t offset = 0;
for (auto& c : buffer_)
{
if (c.get())
{
IF_ERR(return, c->Hes(x, offset, hes));
offset += c->Nf();
}
}
return 0;
}
int update(const T* x)
{
assert(x);
for (auto& c : buffer_)
{
if (c.get())
{
IF_ERR(return, c->update(x));
}
}
return 0;
}
protected:
const std::vector<std::shared_ptr<Constraint<T>>>& buffer_;
size_t xdim_;
};
template <typename T, size_t field>
int compute_hes_pattern(const std::shared_ptr<Functional<T, field>>& energy,
std::shared_ptr<dat_str_core<T, field>>& dat_str)
{
const size_t total_dim = energy->Nx();
dat_str->set_zero();
Eigen::Matrix<T, -1, 1> random_x(total_dim);
{
#pragma omp parallel for
for (size_t i = 0; i < total_dim; ++i)
{
random_x(i) = i * 4.5 + i * i;
}
dat_str->set_zero();
__TIME_BEGIN__;
IF_ERR(return, energy->Hes(random_x.data(), dat_str));
dat_str->setFromTriplets();
const auto sm1 = dat_str->get_hes();
std::cout << "the number of nonzeros with comparison: \n"
<< (Eigen::Map<const Eigen::Matrix<T, -1, 1>>(sm1.valuePtr(), sm1.nonZeros()).array() != 0).count()
<< std::endl;
std::cout << "sparcity: " << T(sm1.nonZeros()) / T((sm1.rows() * sm1.cols())) << std::endl;
dat_str->set_hes_zero_after_pre_compute();
__TIME_END__("[INFO] Pre_compute_hes");
return 0;
}
}
} // namespace PhysIKA
#endif // NUMERIC_DEF_H
|
ark_heat1D_adapt_ompdev.c | /*---------------------------------------------------------------
* Programmer(s): Shelby Lockhart @ LLNL
*---------------------------------------------------------------
* Based on the serial code ark_heat1D_adapt.c developed
* by David R. Reynolds and parallelized with OpenMP 4.5
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2019, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a simple 1D heat equation,
* u_t = k*u_xx + f
* for t in [0, 10], x in [0, 1], with initial conditions
* u(0,x) = 0
* Dirichlet boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* and a heating term of the form
* f = 2*exp(-200*(x-0.25)*(x-0.25))
* - exp(-400*(x-0.7)*(x-0.7))
* + exp(-500*(x-0.4)*(x-0.4))
* - 2*exp(-600*(x-0.55)*(x-0.55));
*
* The spatial derivatives are computed using a three-point
* centered stencil (second order for a uniform mesh). The data
* is initially uniformly distributed over N points in the interval
* [0, 1], but as the simulation proceeds the mesh is adapted.
*
* This program solves the problem with a DIRK method, solved with
* a Newton iteration and SUNPCG linear solver, with a user-supplied
* Jacobian-vector product routine.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */
#include <nvector/nvector_openmpdev.h> /* OpenMPDEV N_Vector types, fcts., macros */
#include <sunlinsol/sunlinsol_pcg.h> /* access to PCG SUNLinearSolver */
#include <sundials/sundials_types.h> /* defs. of realtype, sunindextype, etc */
#include <sundials/sundials_math.h> /* def. of SUNRsqrt, etc. */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* user data structure */
typedef struct {
sunindextype N; /* current number of intervals */
realtype *x_host; /* current mesh on host */
realtype *x_dev; /* current mesh on device */
realtype k; /* diffusion coefficient */
realtype refine_tol; /* adaptivity tolerance */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp);
/* Private function to check function return values */
realtype * adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata);
static int project(sunindextype Nold, realtype *xold, N_Vector yold,
sunindextype Nnew, realtype *xnew, N_Vector ynew);
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main() {
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(1.0); /* final time */
realtype rtol = 1.e-3; /* relative tolerance */
realtype atol = 1.e-10; /* absolute tolerance */
realtype hscale = 1.0; /* time step change factor on resizes */
UserData udata = NULL;
realtype *data;
sunindextype N = 21; /* initial spatial mesh size */
realtype refine = 3.e-3; /* adaptivity refinement tolerance */
realtype k = 0.5; /* heat conductivity */
sunindextype i;
long int nni, nni_cur=0, nni_tot=0, nli, nli_tot=0;
int iout=0;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
N_Vector y2 = NULL; /* empty vector for storing solution */
N_Vector yt = NULL; /* empty vector for swapping */
SUNLinearSolver LS = NULL; /* empty linear solver object */
void *arkode_mem = NULL; /* empty ARKStep memory structure */
FILE *XFID, *UFID;
realtype t, olddt, newdt;
realtype *xnew = NULL;
realtype *x_dev_temp = NULL;
sunindextype Nnew;
int dev, host;
/* get host and offloading device */
dev = omp_get_default_device();
host = omp_get_initial_device();
/* allocate and fill initial udata structure */
udata = (UserData) malloc(sizeof(*udata));
udata->N = N;
udata->k = k;
udata->refine_tol = refine;
udata->x_host = malloc(N * sizeof(realtype));
udata->x_dev = omp_target_alloc(N * sizeof(realtype), dev);
for (i=0; i<N; i++) udata->x_host[i] = 1.0*i/(N-1);
/* copy mesh to device */
omp_target_memcpy(udata->x_dev, udata->x_host, N * sizeof(realtype), 0, 0, dev, host);
/* Initial problem output */
printf("\n1D adaptive Heat PDE test problem:\n");
printf(" diffusion coefficient: k = %"GSYM"\n", udata->k);
printf(" initial N = %li\n", (long int) udata->N);
/* Initialize data structures */
y = N_VNew_OpenMPDEV(N); /* Create initial OpenMPDEV vector for solution */
if (check_flag((void *) y, "N_VNew_OpenMPDEV", 0)) return 1;
N_VConst(0.0, y); /* Set initial conditions */
/* output mesh to disk */
XFID=fopen("heat_mesh.txt","w");
/* output initial mesh to disk */
for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]);
fprintf(XFID,"\n");
/* Open output stream for results, access data array */
UFID=fopen("heat1D.txt","w");
/* output initial condition to disk */
N_VCopyFromDevice_OpenMPDEV(y);
data = N_VGetHostArrayPointer_OpenMPDEV(y);
for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]);
fprintf(UFID,"\n");
/* Create the solver memory */
arkode_mem = ARKStepCreate(NULL, f, T0, y);
if (check_flag((void *) arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSetMaxNumSteps(arkode_mem, 10000); /* Increase max num steps */
if (check_flag(&flag, "ARKStepSetMaxNumSteps", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, rtol, atol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
flag = ARKStepSetAdaptivityMethod(arkode_mem, 2, 1, 0, NULL); /* Set adaptivity method */
if (check_flag(&flag, "ARKStepSetAdaptivityMethod", 1)) return 1;
flag = ARKStepSetPredictorMethod(arkode_mem, 0); /* Set predictor method */
if (check_flag(&flag, "ARKStepSetPredictorMethod", 1)) return 1;
/* Specify linearly implicit RHS, with time-dependent Jacobian */
flag = ARKStepSetLinear(arkode_mem, 1);
if (check_flag(&flag, "ARKStepSetLinear", 1)) return 1;
/* Initialize PCG solver -- no preconditioning, with up to N iterations */
LS = SUNLinSol_PCG(y, 0, N);
if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1;
/* Linear solver interface -- set user-supplied J*v routine (no 'jtsetup' required) */
flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1;
/* Main time-stepping loop: calls ARKStep to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
olddt = 0.0;
newdt = 0.0;
printf(" iout dt_old dt_new ||u||_rms N NNI NLI\n");
printf(" ----------------------------------------------------------------------------------------\n");
printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2i %3i\n",
iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N),
(long int) udata->N, 0, 0);
while (t < Tf) {
/* "set" routines */
flag = ARKStepSetStopTime(arkode_mem, Tf);
if (check_flag(&flag, "ARKStepSetStopTime", 1)) return 1;
flag = ARKStepSetInitStep(arkode_mem, newdt);
if (check_flag(&flag, "ARKStepSetInitStep", 1)) return 1;
/* call integrator */
flag = ARKStepEvolve(arkode_mem, Tf, y, &t, ARK_ONE_STEP);
if (check_flag(&flag, "ARKStep", 1)) return 1;
/* "get" routines */
flag = ARKStepGetLastStep(arkode_mem, &olddt);
if (check_flag(&flag, "ARKStepGetLastStep", 1)) return 1;
flag = ARKStepGetCurrentStep(arkode_mem, &newdt);
if (check_flag(&flag, "ARKStepGetCurrentStep", 1)) return 1;
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
if (check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1)) return 1;
flag = ARKStepGetNumLinIters(arkode_mem, &nli);
if (check_flag(&flag, "ARKStepGetNumLinIters", 1)) return 1;
/* print current solution stats */
iout++;
printf(" %4i %19.15"ESYM" %19.15"ESYM" %19.15"ESYM" %li %2li %3li\n",
iout, olddt, newdt, SUNRsqrt(N_VDotProd(y,y)/udata->N),
(long int) udata->N, nni-nni_cur, nli);
nni_cur = nni;
nni_tot = nni;
nli_tot += nli;
/* output results and current mesh to disk */
N_VCopyFromDevice_OpenMPDEV(y);
data = N_VGetHostArrayPointer_OpenMPDEV(y);
for (i=0; i<udata->N; i++) fprintf(UFID," %.16"ESYM, data[i]);
fprintf(UFID,"\n");
for (i=0; i<udata->N; i++) fprintf(XFID," %.16"ESYM, udata->x_host[i]);
fprintf(XFID,"\n");
/* adapt the spatial mesh */
xnew = adapt_mesh(y, &Nnew, udata);
if (check_flag(xnew, "ark_adapt", 0)) return 1;
/* create N_Vector of new length */
y2 = N_VNew_OpenMPDEV(Nnew);
if (check_flag((void *) y2, "N_VNew_OpenMPDEV", 0)) return 1;
x_dev_temp = omp_target_alloc(Nnew * sizeof(realtype), dev);
omp_target_memcpy(x_dev_temp, xnew, Nnew*sizeof(realtype), 0, 0, dev, host);
/* project solution onto new mesh */
flag = project(udata->N, udata->x_dev, y, Nnew, x_dev_temp, y2);
if (check_flag(&flag, "project", 1)) return 1;
/* delete old vector, old mesh */
N_VDestroy(y);
free(udata->x_host);
omp_target_free(udata->x_dev, dev);
/* swap x and xnew so that new mesh is stored in udata structure */
udata->x_host = xnew;
xnew = NULL;
udata->N = Nnew; /* store size of new mesh */
udata->x_dev = x_dev_temp;
x_dev_temp = NULL;
/* swap y and y2 so that y holds new solution */
yt = y;
y = y2;
y2 = yt;
/* call ARKStepResize to notify integrator of change in mesh */
flag = ARKStepResize(arkode_mem, y, hscale, t, NULL, NULL);
if (check_flag(&flag, "ARKStepResize", 1)) return 1;
/* destroy and re-allocate linear solver memory; reattach to ARKStep interface */
SUNLinSolFree(LS);
LS = SUNLinSol_PCG(y, 0, N);
if (check_flag((void *)LS, "SUNLinSol_PCG", 0)) return 1;
flag = ARKStepSetLinearSolver(arkode_mem, LS, NULL); /* Attach linear solver to ARKStep */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacTimes(arkode_mem, NULL, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacTimes", 1)) return 1;
}
printf(" ----------------------------------------------------------------------------------------\n");
/* print some final statistics */
printf(" Final solver statistics:\n");
printf(" Total number of time steps = %i\n", iout);
printf(" Total nonlinear iterations = %li\n", nni_tot);
printf(" Total linear iterations = %li\n\n", nli_tot);
/* Clean up and return with successful completion */
fclose(UFID);
fclose(XFID);
N_VDestroy(y); /* Free vectors */
free(udata->x_host); /* Free user data */
omp_target_free(udata->x_dev, dev);
free(udata);
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
return 0;
}
/*--------------------------------
* Functions called by the solver
*--------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype k = udata->k;
realtype *x = udata->x_dev;
realtype *Y=NULL, *Ydot=NULL;
realtype dxL, dxR;
sunindextype i;
int dev;
dev = omp_get_default_device();
Y = N_VGetDeviceArrayPointer_OpenMPDEV(y); /* access data arrays */
if (check_flag((void *) Y, "N_VGetDeviceArrayPointer", 0)) return 1;
Ydot = N_VGetDeviceArrayPointer_OpenMPDEV(ydot);
if (check_flag((void *) Ydot, "N_VGetDeviceArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* Initialize ydot to zero - also handles boundary conditions */
#pragma omp target map(to:N) is_device_ptr(x, Ydot, Y) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
{
/* iterate over domain, computing all equations */
for (i=1; i<N-1; i++) { /* interior */
dxL = x[i]-x[i-1];
dxR = x[i+1]-x[i];
Ydot[i] = Y[i-1]*k*2.0/(dxL*(dxL+dxR))
- Y[i]*k*2.0/(dxL*dxR)
+ Y[i+1]*k*2.0/(dxR*(dxL+dxR))
+ 2.0*SUNRexp(-200.0*(x[i]-0.25)*(x[i]-0.25)) /* source term */
- SUNRexp(-400.0*(x[i]-0.7)*(x[i]-0.7))
+ SUNRexp(-500.0*(x[i]-0.4)*(x[i]-0.4))
- 2.0*SUNRexp(-600.0*(x[i]-0.55)*(x[i]-0.55));
}
}
/* source term not iterated over in first loop */
#pragma omp target is_device_ptr(Ydot,x) device(dev)
{
Ydot[0] = 2.0*SUNRexp(-200.0*(x[0]-0.25)*(x[0]-0.25))
- SUNRexp(-400.0*(x[0]-0.7)*(x[0]-0.7))
+ SUNRexp(-500.0*(x[0]-0.4)*(x[0]-0.4))
- 2.0*SUNRexp(-600.0*(x[0]-0.55)*(x[0]-0.55));
}
return 0; /* Return with success */
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(N_Vector v, N_Vector Jv, realtype t, N_Vector y,
N_Vector fy, void *user_data, N_Vector tmp)
{
UserData udata = (UserData) user_data; /* variable shortcuts */
sunindextype N = udata->N;
realtype k = udata->k;
realtype *x = udata->x_dev;
realtype *V=NULL, *JV=NULL;
realtype dxL, dxR;
sunindextype i;
int dev;
dev = omp_get_default_device();
V = N_VGetDeviceArrayPointer_OpenMPDEV(v); /* access data arrays */
if (check_flag((void *) V, "N_VGetDeviceArrayPointer", 0)) return 1;
JV = N_VGetDeviceArrayPointer_OpenMPDEV(Jv);
if (check_flag((void *) JV, "N_VGetDeviceArrayPointer", 0)) return 1;
N_VConst(0.0, Jv); /* initialize Jv product to zero - also handles boundary conditions */
/* iterate over domain, computing all Jacobian-vector products */
#pragma omp target map(to:N) is_device_ptr(x, JV, V) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
for (i=1; i<N-1; i++) {
dxL = x[i]-x[i-1];
dxR = x[i+1]-x[i];
JV[i] = V[i-1]*k*2.0/(dxL*(dxL+dxR))
- V[i]*k*2.0/(dxL*dxR)
+ V[i+1]*k*2.0/(dxR*(dxL+dxR));
}
return 0; /* Return with success */
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Adapts the current mesh, using a simple adaptivity strategy of
refining when an approximation of the scaled second-derivative is
too large. We only do this in one sweep, so no attempt is made to
ensure the resulting mesh meets these same criteria after adaptivity:
y [input] -- the current solution vector
Nnew [output] -- the size of the new mesh
udata [input] -- the current system information
The return for this function is a pointer to the new mesh. */
realtype * adapt_mesh(N_Vector y, sunindextype *Nnew, UserData udata)
{
sunindextype i, j;
int *marks=NULL, *marks_dev=NULL;
realtype ydd, refine_tol, *xold=NULL, *xnew=NULL, *Y_dev=NULL;
sunindextype num_refine, N_new, N;
int dev, host;
dev = omp_get_default_device();
host = omp_get_initial_device();
/* Access current solution and mesh arrays */
xold = udata->x_host;
Y_dev = N_VGetDeviceArrayPointer_OpenMPDEV(y);
if (check_flag((void *) Y_dev, "N_VGetDeviceArrayPointer_OpenMPDEV", 0)) return NULL;
/*N_VCopyFromDevice_OpenMPDEV(y);*/
/* create marking array */
marks = calloc(udata->N-1, sizeof(int));
marks_dev = omp_target_alloc((udata->N-1) * sizeof(int), dev);
N = udata->N;
refine_tol = udata->refine_tol;
/* perform marking:
0 -> leave alone
1 -> refine */
#pragma omp target map(to:N,refine_tol) is_device_ptr(marks_dev,Y_dev) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
{
for (i=1; i<N-1; i++) {
/* approximate scaled second-derivative */
ydd = Y_dev[i-1] - 2.0*Y_dev[i] + Y_dev[i+1];
/* check for refinement */
if (fabs(ydd) > refine_tol) {
marks_dev[i-1] = 1;
marks_dev[i] = 1;
}
}
}
omp_target_memcpy(marks, marks_dev, (N-1)*sizeof(int), 0, 0, host, dev);
/* allocate new mesh */
num_refine = 0;
for (i=0; i<udata->N-1; i++)
if (marks[i] == 1) num_refine++;
N_new = udata->N + num_refine;
*Nnew = N_new; /* Store new array length */
xnew = malloc((N_new) * sizeof(realtype));
/* fill new mesh */
xnew[0] = xold[0]; /* store endpoints */
xnew[N_new-1] = xold[N-1];
j=1;
/* iterate over old intervals */
for (i=0; i<N-1; i++) {
/* if mark is not 1, reuse old interval */
if (marks[i] != 1) {
xnew[j++] = xold[i+1];
continue;
}
/* if mark is 1, refine old interval */
if (marks[i] == 1) {
xnew[j++] = 0.5*(xold[i]+xold[i+1]);
xnew[j++] = xold[i+1];
continue;
}
}
/* verify that new mesh is legal */
for (i=0; i<N_new-1; i++) {
if (xnew[i+1] <= xnew[i]) {
fprintf(stderr,"adapt_mesh error: illegal mesh created\n");
free(xnew);
return NULL;
}
}
free(marks); /* Delete marking array */
omp_target_free(marks_dev, dev);
return xnew; /* Return with success */
}
/* Projects one vector onto another:
Nold [input] -- the size of the old mesh
xold [input] -- the old mesh
yold [input] -- the vector defined over the old mesh
Nnew [input] -- the size of the new mesh
xnew [input] -- the new mesh
ynew [output] -- the vector defined over the new mesh
(allocated prior to calling project) */
static int project(sunindextype Nold, realtype *xold, N_Vector yold,
sunindextype Nnew, realtype *xnew, N_Vector ynew)
{
sunindextype iv, i, j;
realtype *Yold=NULL, *Ynew=NULL;
int dev = omp_get_default_device();
/* Access data arrays */
Yold = N_VGetDeviceArrayPointer_OpenMPDEV(yold); /* access data arrays */
if (check_flag((void *) Yold, "N_VGetArrayPointer", 0)) return 1;
Ynew = N_VGetDeviceArrayPointer_OpenMPDEV(ynew);
if (check_flag((void *) Ynew, "N_VGetArrayPointer", 0)) return 1;
/* loop over new mesh, finding corresponding interval within old mesh,
and perform piecewise linear interpolation from yold to ynew */
iv=0;
#pragma omp target map(to:iv) is_device_ptr(Yold,Ynew,xnew,xold) device(dev)
#pragma omp teams distribute parallel for schedule(static, 1)
{
for (i=0; i<Nnew; i++) {
/* find old interval, start with previous value since sorted */
for (j=iv; j<Nold-1; j++) {
if (xnew[i] >= xold[j] && xnew[i] <= xold[j+1]) {
iv = j;
break;
}
iv = Nold-1; /* just in case it wasn't found above */
}
/* perform interpolation */
Ynew[i] = Yold[iv]*(xnew[i]-xold[iv+1])/(xold[iv]-xold[iv+1])
+ Yold[iv+1]*(xnew[i]-xold[iv])/(xold[iv+1]-xold[iv]);
}
}
return 0; /* Return with success */
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
operator_tune-inl.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
#ifndef MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#define MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
#include <dmlc/base.h>
#include <dmlc/logging.h>
#include <mshadow/base.h>
#include <atomic>
#include <cstdint>
#include <chrono>
#include <thread>
#include <string>
#include <vector>
#include <algorithm>
#include <list>
#include <random>
#include <unordered_set>
#include "./mxnet_op.h"
#include "./operator_tune.h"
#if (__GNUC__ >= 4 || (__GNUC__ >= 3 && __GNUC_MINOR__ >= 4)) && !defined(__mips__)
# define HAS_CXA_DEMANGLE 1
#else
# define HAS_CXA_DEMANGLE 0
#endif
#if HAS_CXA_DEMANGLE
#include <cxxabi.h>
#endif
namespace mxnet {
namespace op {
#ifndef MXNET_NO_INLINE
#ifdef _MSC_VER
#define MXNET_NO_INLINE __declspec(noinline)
#else
#define MXNET_NO_INLINE __attribute__((noinline))
#endif
#endif // MXNET_NO_INLINE
#define OUTSIDE_COUNT_SHIFT 9
namespace tune {
/*!
* \brief Convert TuningMode value to a string representation
* \param tm Scalar TuningMode value
* \return Character pointer to a string representing the TuningMode value
*/
inline const char *TuningModeToString(const TuningMode tm) {
switch (tm) {
case kAuto:
return "Auto";
case kNeverOMP:
return "NeverOMP";
case kAlwaysOMP:
return "AlwaysOMP";
default:
CHECK(false) << "Unknown TuningMode type: " << static_cast<int>(tm);
return "<unknown>";
}
}
} // namespace tune
/*!
* \brief Engine to tune kernel operations
* \tparam DType Data type to be used when tuning the kernel operations
* \remarks The basic concept here is that we time how long a trivial loop takes with and without
* OMP, subtracting the non-OMP run from the OMP run, which gives us the time
* that the OMP overhead takes. Times were found to be relatively invariant with
* regard ot the number of threads/cores on a given machine.
* Secondly, supplied operators are run and timed (for each data type) in order to determine
* their individual time cost.
*
* Knowing the following items, we can determine how long the OMP and non-OMP run
* is expected to take:
* 1) OMP overhead time
* 2) Number of iterations required
* 3) Number of threads to be used if we choose the OMP method
* 4) The data type
*
* Therefore, at Kernel::Launch() time, we can estimate whether it is faster to use OMP or not
* for the given kernel operator.
*
* Results and efficiency of the tuning is tested in the gtest OMP_TUNING test suite
*/
template<typename DType>
class OperatorTune : public OperatorTuneByType<DType> {
public:
using Tick = OperatorTuneBase::Tick;
using duration_t = OperatorTuneBase::duration_t;
using OperatorTuneByType<DType>::tuning_mode_;
/*!
* \brief Constructor
*/
OperatorTune() {
TuneAll();
}
/*!
* \brief Initialize the OperatorTune object
* \return Whether the OperatorTune object was successfully initialized
*/
static bool Initialize() {
if (!initialized_) {
initialized_ = true;
// Generate some random data for calling the operator kernels
data_set_.reserve(0x100);
std::random_device rd;
std::mt19937 gen(rd());
if (!std::is_integral<DType>::value) {
std::uniform_real_distribution<> dis(-1, 1);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If too close to zero, try again
if (std::fabs(static_cast<double>(val)) < 1e-5) {
--n;
continue;
}
data_set_.emplace_back(val);
}
} else {
std::uniform_int_distribution<> dis(-128, 127);
for (int n = 0; n < 0x100; ++n) {
const auto val = static_cast<DType>(dis(gen));
// If zero, try again
if (!val) {
--n;
continue;
}
data_set_.emplace_back(val);
}
}
// Use this environment variable to generate new tuning statistics
// In order to avoid printing too many copies, only the float32 object prints
output_tuning_data_ = mshadow::DataType<DType>::kFlag == mshadow::kFloat32
&& dmlc::GetEnv("MXNET_OUTPUT_TUNING_DATA", false);
// If outputting tuning data, then also output verbose logging info
OperatorTuneBase::verbose_tuning_info_ = dmlc::GetEnv("MXNET_VERBOSE_TUNING_INFO", false);
OperatorTuneBase::tuning_weight_scale_ = dmlc::GetEnv("MXNET_TUNING_WEIGHT_SCALE", 0.0);
// This isn't actually supposed to be multithreaded init, but just to be sure the change is
// seen everywhere, using atomic bool.
if (!OperatorTuneBase::calculated_.load()) {
// Not especially concerned with a race condition, since this hsould
// run when only one thread is active (static init), just don't cache this variable
OperatorTuneBase::calculated_.store(true);
OperatorTuneBase::omp_overhead_ns_ = GetOMPLoopOverhead();
std::string config = dmlc::GetEnv("MXNET_USE_OPERATOR_TUNING", std::string());
ParseEnablerConfig(config);
}
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP overhead: " << OperatorTuneBase::omp_overhead_ns_ << " nanoseconds";
}
}
return true;
}
/*!
* \brief Schedule a tuning run
* \tparam OP Operator to tune
* \param tune_func Function to call which tunes the operator
* \return true if the tune operation was scheduled
*/
template<typename OP>
static bool ScheduleTune(void (*tune_func)()) {
#ifdef MXNET_USE_OPERATOR_TUNING
if (tune_func) {
GetTuningList()->push_back(tune_func);
operator_names_.insert(demangle(typeid(OP).name()));
return true;
}
return false;
#else
return true;
#endif
}
/*!
* \brief Is the template parameter type a tuned kernel?
* \tparam OP kernel operator type
* \return true if the operator/kernel is tuned
*/
template<typename OP>
static bool IsTuned() {
return operator_names_.find(demangle(typeid(OP).name())) != operator_names_.end();
}
/*!\
* \brief Tune all registered kernel operators that haven't already been tuned
*/
static bool TuneAll() {
Initialize();
std::list<void (*)()> *tl = GetTuningList();
const size_t size_save = tl->size(); // For checking if anything asynchronous is
// adding or removing items, which is forbidden
if (output_tuning_data_ && !tl->empty()) {
// Only emit this once, use the most common case, 'float32'
if (mshadow::DataType<DType>::kFlag == mshadow::kFloat32) {
std::cout << "OperatorTuneBase::duration_t "
<< "OperatorTuneBase::omp_overhead_ns_ = " << OperatorTuneBase::omp_overhead_ns_
<< ";" << std::endl << std::flush;
}
}
const Tick start = std::chrono::high_resolution_clock::now();
for (auto i : *tl) {
(*i)();
}
if (OperatorTuneBase::verbose_tuning_info_) {
const duration_t duration = OperatorTune::GetDurationInNanoseconds(start);
LOG(INFO) << "Op Tuning for " << type_name<DType>()
<< " took " << (duration / 1000000) << " ms";
}
CHECK_EQ(size_save, tl->size()) << "Tuning list size should not have changed while tuning";
tl->clear();
return true;
}
/*!
* \brief Return set of operator names that were registered to be tuned. Does not imply
* that the operator has been tuned.
* \return Set of operator/kernel names that were registered for tuning
*/
static const std::unordered_set<std::string>& TunedOperatorNames() {
return operator_names_;
}
protected:
/*!
* \brief Get the list of tuning function calls for the operators
* \return Pointer to list of tuning function calls
*/
static std::list<void (*)()> *GetTuningList();
/*!
* \brief Demangle typeid::name() in order to generate source macros
* \param name C++ Mangled name
* \return Demangled name as string
*/
static inline std::string demangle(const char *name) {
#if HAS_CXA_DEMANGLE
int status = -4; // some arbitrary value to eliminate the compiler warning
std::unique_ptr<char, void (*)(void *)> res{
abi::__cxa_demangle(name, nullptr, nullptr, &status),
&std::free
};
return status ? name : res.get();
#else
return name;
#endif
}
/*!
* \brief Type name as string
* \tparam T Type
* \return std::string representing the human-readable demangled type name
*/
template<typename T> static inline std::string type_name() {
return demangle(typeid(T).name());
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \param omp_thread_count - Number of OMP threads to use in the timing test
* \returns Duration in nanoseconds for the OMP overhead (time to initiate and close the
* OMP session)
*/
static duration_t GetOMPLoopOverhead(const size_t omp_thread_count) {
CHECK_GT(omp_thread_count, 1); // Don't try to use OMP for one thread
int wl_count = OperatorTuneBase::WORKLOAD_COUNT;
Tick start = std::chrono::high_resolution_clock::now();
// Use two loops in order to simulate OMP outside timing
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const OperatorTuneBase::duration_t no_omp_duration =
OperatorTuneBase::GetDurationInNanoseconds(start);
// Scale OMP iterations by type calculation complexity
double factor;
// if tuning_weight_scale_ is a number that looks valid, use it as the factor
if (OperatorTuneBase::tuning_weight_scale_ > 0.01) {
factor = OperatorTuneBase::tuning_weight_scale_;
} else {
// These are empirically-determined constants found by balancing between
// a desktop (8 & 12 cpu's) and large cloud instances (32 & 64 cpu's)
switch (mshadow::DataType<DType>::kFlag) {
case mshadow::kUint8:
case mshadow::kInt8:
factor = 8.5;
break;
case mshadow::kInt32:
factor = 4.5;
break;
case mshadow::kInt64:
factor = 2;
break;
case mshadow::kFloat64:
factor = 1.25;
break;
case mshadow::kFloat32:
default:
factor = 1.0;
break;
}
}
wl_count = static_cast<int>(factor * OperatorTuneBase::WORKLOAD_COUNT * omp_thread_count);
start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < OUTSIDE_COUNT; ++i) {
#pragma omp parallel for num_threads(omp_thread_count)
for (int x = 0; x < wl_count; ++x) {
// trivial operation
volatile_int_ += x;
}
}
const duration_t omp_duration = OperatorTuneBase::GetDurationInNanoseconds(start)
- no_omp_duration;
return omp_duration >> OUTSIDE_COUNT_SHIFT;
}
/*! \brief Measure OMP overhead for a trivial OMP loop using all cores
* \returns Time in nanoseconds to initialize/cleanup when excuting an OMP block
*/
static duration_t GetOMPLoopOverhead() {
// It was found empirically that OMP times was not heavily tied to number of cores,
// so take an average across all core counts
const auto max_cores = static_cast<size_t>(omp_get_num_procs()) >> 1;
if (max_cores >= 2) {
std::vector<duration_t> core_times;
// Take care of any OMP lazy-init with a throwaway call
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
GetOMPLoopOverhead(omp_threads);
}
std::vector<duration_t> durations;
durations.reserve(max_cores - 1);
for (size_t omp_threads = 2; omp_threads <= max_cores; ++omp_threads) {
const duration_t duration = GetOMPLoopOverhead(omp_threads);
if (OperatorTuneBase::verbose_tuning_info_) {
LOG(INFO) << "OMP Thread Count: " << omp_threads << ", overhead: " << duration << " ns";
}
durations.emplace_back(duration);
}
// return median
std::sort(durations.begin(), durations.end());
return durations[durations.size() >> 1];
}
return INT_MAX; // If only one core, then never use OMP (say the overhead is huge)
}
/*!
* \brief Some string utility functions that aren't specific to tuning
*/
struct StringUtil {
/*!
* \brief Terim whitespace from beninning and end of string
* \param s String to trimp
* \return reference to the modified string. This is the same std::string object as what was
* supplied in the parameters
*/
static std::string &trim(std::string *s) {
s->erase(s->begin(), std::find_if(s->begin(), s->end(), [](int ch) {
return !std::isspace(ch);
}));
s->erase(std::find_if(s->rbegin(), s->rend(), [](int ch) {
return !std::isspace(ch);
}).base(), s->end());
return *s;
}
/*!
* \brief Tokenize a string into a list of tokens
* \param s String to tokenize
* \return std::list of tokens
*/
static std::list<std::string> string2list(const std::string &s) {
std::list<std::string> res;
std::istringstream iss(s);
std::string token;
while (std::getline(iss, token, ',')) {
trim(&token);
if (!token.empty()) {
res.push_back(token);
}
}
return res;
}
};
/*!
* \brief Get data type from string representation
* \warning Do not call from a performance-sensitive area
*/
static int type_from_string(const std::string& type_string) {
if (type_string == "float32")
return mshadow::kFloat32;
if (type_string == "float64")
return mshadow::kFloat64;
if (type_string == "float16")
return mshadow::kFloat16;
if (type_string == "int8")
return mshadow::kInt8;
if (type_string == "uint8")
return mshadow::kUint8;
if (type_string == "int32")
return mshadow::kInt32;
if (type_string == "int64")
return mshadow::kInt64;
return -1; // invalid
}
/*!
* \brief Parse MXNET_ENABLE_OPERATOR_TUNING environment variable
* \param config String representation of MXNET_ENABLE_OPERATOR_TUNING environment variable
* Values:
* 0=disable all
* 1=enable all
* float32, float16, float32=list of types to enable, and disable those not listed
*/
static void ParseEnablerConfig(std::string config) {
StringUtil::trim(&config);
if (!config.empty()) {
// First disable all
OperatorTuneByType<float>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<double>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAlwaysOMP);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAlwaysOMP);
// See if it's a non-number (ie type or list of types)
if (!::isdigit(config[0])) {
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
std::list<std::string> tokens = StringUtil::string2list(config);
for (const std::string& stype : tokens) {
// We don't have an enum for halt_t
const int typ = type_from_string(stype);
if (typ >= 0) {
switch (typ) {
case mshadow::kFloat32:
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat64:
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kFloat16:
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt8:
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kUint8:
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt32:
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
break;
case mshadow::kInt64:
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
break;
default:
CHECK(false) << "Unsupported tuning data type: " << stype;
break;
}
} else {
// -1 is error
LOG(WARNING) << "Unknown data type to be tuned: " << stype;
}
}
} else {
if (std::atoi(config.c_str()) > 0) {
OperatorTuneByType<float>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<double>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<uint8_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int32_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<int64_t>::set_tuning_mode(tune::kAuto);
OperatorTuneByType<mshadow::half::half_t>::set_tuning_mode(tune::kAuto);
}
}
}
}
/*! \brief Whether this object has been initialized */
static bool initialized_;
/*! \brief Number of passes to obtain an average */
static constexpr duration_t OUTSIDE_COUNT = (1 << OUTSIDE_COUNT_SHIFT);
/*! \brief Random data for timing operator calls */
static std::vector<DType> data_set_;
/*! \brief Operators tuned */
static std::unordered_set<std::string> operator_names_;
/*! \brief Arbitary object to modify in OMP loop */
static volatile int volatile_int_;
/*! \brief Output insertable (into code) instantiation+default-value macros */
static bool output_tuning_data_;
};
/*!
* \brief Class that tunes unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class UnaryOpTune : public OperatorTune<DType> {
protected:
typedef OperatorTune<DType> Super;
using duration_t = typename Super::duration_t;
using Tick = typename Super::Tick;
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take no arguments (ie set_zero)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res += OP::Map();
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take one argument (ie sqrt())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetUnaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take two arguments (ie elemwise_add())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static inline duration_t GetBinaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF], Super::data_set_[(i + 1) & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for kernels that take three arguments (ie backwards_grad<elemwise_add>())
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetTertiaryWorkload() {
DType tmp;
volatile DType *res = &tmp;
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
// Use a logical AND instead of mod to avoid affecting the timing result with a slow divide
*res = OP::Map(Super::data_set_[i & 0xFF],
Super::data_set_[(i + 1) & 0xFF],
Super::data_set_[i & 0xFF]);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
/*!
* \brief Determine the time it takes a kernel operator to execute WORKLOAD_COUNT iterations
* Used for mxnet-like kernels that take no arguments)
* \tparam OP Kernel operator
* \return Duration in nanoseconds for the 'WORKLOAD_COUNT' operations
*/
template<typename OP>
static duration_t GetBlankWorkloadEx() {
std::unique_ptr<DType[]> tmp(new DType[Super::WORKLOAD_COUNT]);
DType *tmp_ptr = tmp.get();
const Tick start = std::chrono::high_resolution_clock::now();
for (size_t i = 0; i < Super::WORKLOAD_COUNT; ++i) {
OP::Map(i, tmp_ptr);
}
const duration_t omp_duration = Super::GetDurationInNanoseconds(start);
return omp_duration ? omp_duration : 1;
}
public:
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryOperator() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetUnaryWorkload<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified kernel operator. Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes a backward operator which takes one argument
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneUnaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
GetBinaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_UNARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune the specified "mxnet_op-type" kernel operator.
* Optionally print out C++ macro that defines the
* tuning data variable and the default tuned value
* This function tunes an operator which takes no arguments
* \tparam OP The kernel operator to be tuned
*/
template<typename OP>
static void TuneBlankOperatorEx() {
mxnet::op::mxnet_op::tuned_op<OP, DType>::workload_[0] = GetBlankWorkloadEx<OP>();
if (Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BLANK_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Determine whether to use OMP based upon both timing and configuration using the
* given (templated) operator's workload
* \tparam OP Operator whose workload to use (tuned_op::workload_[0])
* \param N Number of iterations desired
* \param thread_count Number of OMP threads available to perform the iterations
* \returns Whether it's faster to use OMP for these iterations
*/
template<typename OP>
inline static bool UseOMP(size_t N, size_t thread_count) {
return OperatorTune<DType>::UseOMP(N,
thread_count,
static_cast<uint64_t>(N) * OP::workload_[0]);
}
};
/*!
* \brief Class that tunes binary and unary operators
* \tparam DType Data type to be used when tuning the kernel operations
*/
template<typename DType>
class BinaryOpTune : public UnaryOpTune<DType> {
protected:
typedef UnaryOpTune<DType> Super;
public:
/*!
* \brief Tune a generic binary operator
* @tparam OP - Operator type
*/
template<typename OP>
static void TuneBinaryOperator() {
mxnet_op::tuned_op<OP, DType>::workload_[0] = Super::template GetBinaryWorkload<OP>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_FWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
/*!
* \brief Tune binary backward operator
* \tparam OP - operator
*/
template<typename OP>
static void TuneBinaryBackwardOperator() {
mxnet::op::mxnet_op::tuned_op<mxnet_op::backward_grad_tuned<OP>, DType>::workload_[0] =
Super::template GetTertiaryWorkload<mxnet::op::mxnet_op::backward_grad_tuned<OP>>();
if (Super::Super::output_tuning_data_) {
std::cout << "IMPLEMENT_BINARY_WORKLOAD_BWD("
<< Super::template type_name<OP>()
<< "); // NOLINT()" << std::endl << std::flush; // For long lines
}
}
};
#undef OUTSIDE_COUNT_SHIFT
#undef WORKLOAD_COUNT_SHIFT
} // namespace op
} // namespace mxnet
#endif // MXNET_OPERATOR_OPERATOR_TUNE_INL_H_
|
truncate.c | /* Generated by Cython 0.29.21 */
/* BEGIN: Cython Metadata
{
"distutils": {
"depends": [
"/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h",
"/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h",
"draco/util/truncate.hpp"
],
"extra_compile_args": [
"-fopenmp"
],
"extra_link_args": [
"-fopenmp"
],
"include_dirs": [
"draco/util",
"/cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/core/include"
],
"name": "draco.util.truncate",
"sources": [
"draco/util/truncate.pyx"
]
},
"module_name": "draco.util.truncate"
}
END: Cython Metadata */
#define PY_SSIZE_T_CLEAN
#include "Python.h"
#ifndef Py_PYTHON_H
#error Python headers needed to compile C extensions, please install development version of Python.
#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000)
#error Cython requires Python 2.6+ or Python 3.3+.
#else
#define CYTHON_ABI "0_29_21"
#define CYTHON_HEX_VERSION 0x001D15F0
#define CYTHON_FUTURE_DIVISION 0
#include <stddef.h>
#ifndef offsetof
#define offsetof(type, member) ( (size_t) & ((type*)0) -> member )
#endif
#if !defined(WIN32) && !defined(MS_WINDOWS)
#ifndef __stdcall
#define __stdcall
#endif
#ifndef __cdecl
#define __cdecl
#endif
#ifndef __fastcall
#define __fastcall
#endif
#endif
#ifndef DL_IMPORT
#define DL_IMPORT(t) t
#endif
#ifndef DL_EXPORT
#define DL_EXPORT(t) t
#endif
#define __PYX_COMMA ,
#ifndef HAVE_LONG_LONG
#if PY_VERSION_HEX >= 0x02070000
#define HAVE_LONG_LONG
#endif
#endif
#ifndef PY_LONG_LONG
#define PY_LONG_LONG LONG_LONG
#endif
#ifndef Py_HUGE_VAL
#define Py_HUGE_VAL HUGE_VAL
#endif
#ifdef PYPY_VERSION
#define CYTHON_COMPILING_IN_PYPY 1
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 0
#undef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 0
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#if PY_VERSION_HEX < 0x03050000
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#undef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#undef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 1
#undef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 0
#undef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 0
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#elif defined(PYSTON_VERSION)
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 1
#define CYTHON_COMPILING_IN_CPYTHON 0
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#undef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 0
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#undef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 0
#undef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 0
#undef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT 0
#undef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE 0
#undef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS 0
#undef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK 0
#else
#define CYTHON_COMPILING_IN_PYPY 0
#define CYTHON_COMPILING_IN_PYSTON 0
#define CYTHON_COMPILING_IN_CPYTHON 1
#ifndef CYTHON_USE_TYPE_SLOTS
#define CYTHON_USE_TYPE_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYTYPE_LOOKUP
#define CYTHON_USE_PYTYPE_LOOKUP 0
#elif !defined(CYTHON_USE_PYTYPE_LOOKUP)
#define CYTHON_USE_PYTYPE_LOOKUP 1
#endif
#if PY_MAJOR_VERSION < 3
#undef CYTHON_USE_ASYNC_SLOTS
#define CYTHON_USE_ASYNC_SLOTS 0
#elif !defined(CYTHON_USE_ASYNC_SLOTS)
#define CYTHON_USE_ASYNC_SLOTS 1
#endif
#if PY_VERSION_HEX < 0x02070000
#undef CYTHON_USE_PYLONG_INTERNALS
#define CYTHON_USE_PYLONG_INTERNALS 0
#elif !defined(CYTHON_USE_PYLONG_INTERNALS)
#define CYTHON_USE_PYLONG_INTERNALS 1
#endif
#ifndef CYTHON_USE_PYLIST_INTERNALS
#define CYTHON_USE_PYLIST_INTERNALS 1
#endif
#ifndef CYTHON_USE_UNICODE_INTERNALS
#define CYTHON_USE_UNICODE_INTERNALS 1
#endif
#if PY_VERSION_HEX < 0x030300F0
#undef CYTHON_USE_UNICODE_WRITER
#define CYTHON_USE_UNICODE_WRITER 0
#elif !defined(CYTHON_USE_UNICODE_WRITER)
#define CYTHON_USE_UNICODE_WRITER 1
#endif
#ifndef CYTHON_AVOID_BORROWED_REFS
#define CYTHON_AVOID_BORROWED_REFS 0
#endif
#ifndef CYTHON_ASSUME_SAFE_MACROS
#define CYTHON_ASSUME_SAFE_MACROS 1
#endif
#ifndef CYTHON_UNPACK_METHODS
#define CYTHON_UNPACK_METHODS 1
#endif
#ifndef CYTHON_FAST_THREAD_STATE
#define CYTHON_FAST_THREAD_STATE 1
#endif
#ifndef CYTHON_FAST_PYCALL
#define CYTHON_FAST_PYCALL 1
#endif
#ifndef CYTHON_PEP489_MULTI_PHASE_INIT
#define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000)
#endif
#ifndef CYTHON_USE_TP_FINALIZE
#define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1)
#endif
#ifndef CYTHON_USE_DICT_VERSIONS
#define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1)
#endif
#ifndef CYTHON_USE_EXC_INFO_STACK
#define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3)
#endif
#endif
#if !defined(CYTHON_FAST_PYCCALL)
#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1)
#endif
#if CYTHON_USE_PYLONG_INTERNALS
#include "longintrepr.h"
#undef SHIFT
#undef BASE
#undef MASK
#ifdef SIZEOF_VOID_P
enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) };
#endif
#endif
#ifndef __has_attribute
#define __has_attribute(x) 0
#endif
#ifndef __has_cpp_attribute
#define __has_cpp_attribute(x) 0
#endif
#ifndef CYTHON_RESTRICT
#if defined(__GNUC__)
#define CYTHON_RESTRICT __restrict__
#elif defined(_MSC_VER) && _MSC_VER >= 1400
#define CYTHON_RESTRICT __restrict
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_RESTRICT restrict
#else
#define CYTHON_RESTRICT
#endif
#endif
#ifndef CYTHON_UNUSED
# if defined(__GNUC__)
# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER))
# define CYTHON_UNUSED __attribute__ ((__unused__))
# else
# define CYTHON_UNUSED
# endif
#endif
#ifndef CYTHON_MAYBE_UNUSED_VAR
# if defined(__cplusplus)
template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { }
# else
# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x)
# endif
#endif
#ifndef CYTHON_NCP_UNUSED
# if CYTHON_COMPILING_IN_CPYTHON
# define CYTHON_NCP_UNUSED
# else
# define CYTHON_NCP_UNUSED CYTHON_UNUSED
# endif
#endif
#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None)
#ifdef _MSC_VER
#ifndef _MSC_STDINT_H_
#if _MSC_VER < 1300
typedef unsigned char uint8_t;
typedef unsigned int uint32_t;
#else
typedef unsigned __int8 uint8_t;
typedef unsigned __int32 uint32_t;
#endif
#endif
#else
#include <stdint.h>
#endif
#ifndef CYTHON_FALLTHROUGH
#if defined(__cplusplus) && __cplusplus >= 201103L
#if __has_cpp_attribute(fallthrough)
#define CYTHON_FALLTHROUGH [[fallthrough]]
#elif __has_cpp_attribute(clang::fallthrough)
#define CYTHON_FALLTHROUGH [[clang::fallthrough]]
#elif __has_cpp_attribute(gnu::fallthrough)
#define CYTHON_FALLTHROUGH [[gnu::fallthrough]]
#endif
#endif
#ifndef CYTHON_FALLTHROUGH
#if __has_attribute(fallthrough)
#define CYTHON_FALLTHROUGH __attribute__((fallthrough))
#else
#define CYTHON_FALLTHROUGH
#endif
#endif
#if defined(__clang__ ) && defined(__apple_build_version__)
#if __apple_build_version__ < 7000000
#undef CYTHON_FALLTHROUGH
#define CYTHON_FALLTHROUGH
#endif
#endif
#endif
#ifndef CYTHON_INLINE
#if defined(__clang__)
#define CYTHON_INLINE __inline__ __attribute__ ((__unused__))
#elif defined(__GNUC__)
#define CYTHON_INLINE __inline__
#elif defined(_MSC_VER)
#define CYTHON_INLINE __inline
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define CYTHON_INLINE inline
#else
#define CYTHON_INLINE
#endif
#endif
#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag)
#define Py_OptimizeFlag 0
#endif
#define __PYX_BUILD_PY_SSIZE_T "n"
#define CYTHON_FORMAT_SSIZE_T "z"
#if PY_MAJOR_VERSION < 3
#define __Pyx_BUILTIN_MODULE_NAME "__builtin__"
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#define __Pyx_DefaultClassType PyClass_Type
#else
#define __Pyx_BUILTIN_MODULE_NAME "builtins"
#if PY_VERSION_HEX >= 0x030800A4 && PY_VERSION_HEX < 0x030800B2
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, 0, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#else
#define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\
PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)
#endif
#define __Pyx_DefaultClassType PyType_Type
#endif
#ifndef Py_TPFLAGS_CHECKTYPES
#define Py_TPFLAGS_CHECKTYPES 0
#endif
#ifndef Py_TPFLAGS_HAVE_INDEX
#define Py_TPFLAGS_HAVE_INDEX 0
#endif
#ifndef Py_TPFLAGS_HAVE_NEWBUFFER
#define Py_TPFLAGS_HAVE_NEWBUFFER 0
#endif
#ifndef Py_TPFLAGS_HAVE_FINALIZE
#define Py_TPFLAGS_HAVE_FINALIZE 0
#endif
#ifndef METH_STACKLESS
#define METH_STACKLESS 0
#endif
#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL)
#ifndef METH_FASTCALL
#define METH_FASTCALL 0x80
#endif
typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs);
typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args,
Py_ssize_t nargs, PyObject *kwnames);
#else
#define __Pyx_PyCFunctionFast _PyCFunctionFast
#define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords
#endif
#if CYTHON_FAST_PYCCALL
#define __Pyx_PyFastCFunction_Check(func)\
((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)))))
#else
#define __Pyx_PyFastCFunction_Check(func) 0
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc)
#define PyObject_Malloc(s) PyMem_Malloc(s)
#define PyObject_Free(p) PyMem_Free(p)
#define PyObject_Realloc(p) PyMem_Realloc(p)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1
#define PyMem_RawMalloc(n) PyMem_Malloc(n)
#define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n)
#define PyMem_RawFree(p) PyMem_Free(p)
#endif
#if CYTHON_COMPILING_IN_PYSTON
#define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno)
#else
#define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0)
#define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno)
#endif
#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#elif PY_VERSION_HEX >= 0x03060000
#define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet()
#elif PY_VERSION_HEX >= 0x03000000
#define __Pyx_PyThreadState_Current PyThreadState_GET()
#else
#define __Pyx_PyThreadState_Current _PyThreadState_Current
#endif
#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT)
#include "pythread.h"
#define Py_tss_NEEDS_INIT 0
typedef int Py_tss_t;
static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) {
*key = PyThread_create_key();
return 0;
}
static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) {
Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t));
*key = Py_tss_NEEDS_INIT;
return key;
}
static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) {
PyObject_Free(key);
}
static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) {
return *key != Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) {
PyThread_delete_key(*key);
*key = Py_tss_NEEDS_INIT;
}
static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) {
return PyThread_set_key_value(*key, value);
}
static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) {
return PyThread_get_key_value(*key);
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized)
#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n))
#else
#define __Pyx_PyDict_NewPresized(n) PyDict_New()
#endif
#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION
#define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y)
#else
#define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y)
#define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y)
#endif
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS
#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash)
#else
#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name)
#endif
#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND)
#define CYTHON_PEP393_ENABLED 1
#define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\
0 : _PyUnicode_Ready((PyObject *)(op)))
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i)
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u)
#define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u)
#define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u)
#define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i)
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch)
#if defined(PyUnicode_IS_READY) && defined(PyUnicode_GET_SIZE)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u)))
#else
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u))
#endif
#else
#define CYTHON_PEP393_ENABLED 0
#define PyUnicode_1BYTE_KIND 1
#define PyUnicode_2BYTE_KIND 2
#define PyUnicode_4BYTE_KIND 4
#define __Pyx_PyUnicode_READY(op) (0)
#define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u)
#define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i]))
#define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111)
#define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE))
#define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u))
#define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i]))
#define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch)
#define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u))
#endif
#if CYTHON_COMPILING_IN_PYPY
#define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b)
#else
#define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b)
#define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\
PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b))
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains)
#define PyUnicode_Contains(u, s) PySequence_Contains(u, s)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check)
#define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type)
#endif
#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format)
#define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt)
#endif
#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b))
#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b)
#else
#define __Pyx_PyString_Format(a, b) PyString_Format(a, b)
#endif
#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII)
#define PyObject_ASCII(o) PyObject_Repr(o)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBaseString_Type PyUnicode_Type
#define PyStringObject PyUnicodeObject
#define PyString_Type PyUnicode_Type
#define PyString_Check PyUnicode_Check
#define PyString_CheckExact PyUnicode_CheckExact
#ifndef PyObject_Unicode
#define PyObject_Unicode PyObject_Str
#endif
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj)
#define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj)
#else
#define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj))
#define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj))
#endif
#ifndef PySet_CheckExact
#define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type)
#endif
#if PY_VERSION_HEX >= 0x030900A4
#define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size)
#else
#define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt)
#define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size)
#endif
#if CYTHON_ASSUME_SAFE_MACROS
#define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq)
#else
#define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq)
#endif
#if PY_MAJOR_VERSION >= 3
#define PyIntObject PyLongObject
#define PyInt_Type PyLong_Type
#define PyInt_Check(op) PyLong_Check(op)
#define PyInt_CheckExact(op) PyLong_CheckExact(op)
#define PyInt_FromString PyLong_FromString
#define PyInt_FromUnicode PyLong_FromUnicode
#define PyInt_FromLong PyLong_FromLong
#define PyInt_FromSize_t PyLong_FromSize_t
#define PyInt_FromSsize_t PyLong_FromSsize_t
#define PyInt_AsLong PyLong_AsLong
#define PyInt_AS_LONG PyLong_AS_LONG
#define PyInt_AsSsize_t PyLong_AsSsize_t
#define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask
#define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask
#define PyNumber_Int PyNumber_Long
#endif
#if PY_MAJOR_VERSION >= 3
#define PyBoolObject PyLongObject
#endif
#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY
#ifndef PyUnicode_InternFromString
#define PyUnicode_InternFromString(s) PyUnicode_FromString(s)
#endif
#endif
#if PY_VERSION_HEX < 0x030200A4
typedef long Py_hash_t;
#define __Pyx_PyInt_FromHash_t PyInt_FromLong
#define __Pyx_PyInt_AsHash_t PyInt_AsLong
#else
#define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t
#define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t
#endif
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func))
#else
#define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass)
#endif
#if CYTHON_USE_ASYNC_SLOTS
#if PY_VERSION_HEX >= 0x030500B1
#define __Pyx_PyAsyncMethodsStruct PyAsyncMethods
#define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async)
#else
#define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved))
#endif
#else
#define __Pyx_PyType_AsAsync(obj) NULL
#endif
#ifndef __Pyx_PyAsyncMethodsStruct
typedef struct {
unaryfunc am_await;
unaryfunc am_aiter;
unaryfunc am_anext;
} __Pyx_PyAsyncMethodsStruct;
#endif
#if defined(WIN32) || defined(MS_WINDOWS)
#define _USE_MATH_DEFINES
#endif
#include <math.h>
#ifdef NAN
#define __PYX_NAN() ((float) NAN)
#else
static CYTHON_INLINE float __PYX_NAN() {
float value;
memset(&value, 0xFF, sizeof(value));
return value;
}
#endif
#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL)
#define __Pyx_truncl trunc
#else
#define __Pyx_truncl truncl
#endif
#define __PYX_MARK_ERR_POS(f_index, lineno) \
{ __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; }
#define __PYX_ERR(f_index, lineno, Ln_error) \
{ __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; }
#ifndef __PYX_EXTERN_C
#ifdef __cplusplus
#define __PYX_EXTERN_C extern "C"
#else
#define __PYX_EXTERN_C extern
#endif
#endif
#define __PYX_HAVE__draco__util__truncate
#define __PYX_HAVE_API__draco__util__truncate
/* Early includes */
#include <string.h>
#include <stdio.h>
#include "numpy/arrayobject.h"
#include "numpy/ufuncobject.h"
/* NumPy API declarations from "numpy/__init__.pxd" */
#include "truncate.hpp"
#include "pythread.h"
#include <stdlib.h>
#include "pystate.h"
#ifdef _OPENMP
#include <omp.h>
#endif /* _OPENMP */
#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS)
#define CYTHON_WITHOUT_ASSERTIONS
#endif
typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding;
const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry;
#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0
#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8)
#define __PYX_DEFAULT_STRING_ENCODING ""
#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#define __Pyx_uchar_cast(c) ((unsigned char)c)
#define __Pyx_long_cast(x) ((long)x)
#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\
(sizeof(type) < sizeof(Py_ssize_t)) ||\
(sizeof(type) > sizeof(Py_ssize_t) &&\
likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX) &&\
(!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\
v == (type)PY_SSIZE_T_MIN))) ||\
(sizeof(type) == sizeof(Py_ssize_t) &&\
(is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\
v == (type)PY_SSIZE_T_MAX))) )
static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) {
return (size_t) i < (size_t) limit;
}
#if defined (__cplusplus) && __cplusplus >= 201103L
#include <cstdlib>
#define __Pyx_sst_abs(value) std::abs(value)
#elif SIZEOF_INT >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) abs(value)
#elif SIZEOF_LONG >= SIZEOF_SIZE_T
#define __Pyx_sst_abs(value) labs(value)
#elif defined (_MSC_VER)
#define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value))
#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
#define __Pyx_sst_abs(value) llabs(value)
#elif defined (__GNUC__)
#define __Pyx_sst_abs(value) __builtin_llabs(value)
#else
#define __Pyx_sst_abs(value) ((value<0) ? -value : value)
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*);
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length);
#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s))
#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l)
#define __Pyx_PyBytes_FromString PyBytes_FromString
#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*);
#if PY_MAJOR_VERSION < 3
#define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize
#else
#define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString
#define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize
#endif
#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s))
#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s))
#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s))
#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s)
#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s)
#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s)
#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s)
#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s)
static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) {
const Py_UNICODE *u_end = u;
while (*u_end++) ;
return (size_t)(u_end - u - 1);
}
#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u))
#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode
#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode
#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj)
#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None)
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b);
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*);
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*);
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x);
#define __Pyx_PySequence_Tuple(obj)\
(likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj))
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*);
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t);
#if CYTHON_ASSUME_SAFE_MACROS
#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x))
#else
#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x)
#endif
#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x))
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x))
#else
#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x))
#endif
#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x))
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
static int __Pyx_sys_getdefaultencoding_not_ascii;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
PyObject* ascii_chars_u = NULL;
PyObject* ascii_chars_b = NULL;
const char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
if (strcmp(default_encoding_c, "ascii") == 0) {
__Pyx_sys_getdefaultencoding_not_ascii = 0;
} else {
char ascii_chars[128];
int c;
for (c = 0; c < 128; c++) {
ascii_chars[c] = c;
}
__Pyx_sys_getdefaultencoding_not_ascii = 1;
ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL);
if (!ascii_chars_u) goto bad;
ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL);
if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) {
PyErr_Format(
PyExc_ValueError,
"This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.",
default_encoding_c);
goto bad;
}
Py_DECREF(ascii_chars_u);
Py_DECREF(ascii_chars_b);
}
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
Py_XDECREF(ascii_chars_u);
Py_XDECREF(ascii_chars_b);
return -1;
}
#endif
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL)
#else
#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL)
#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
static char* __PYX_DEFAULT_STRING_ENCODING;
static int __Pyx_init_sys_getdefaultencoding_params(void) {
PyObject* sys;
PyObject* default_encoding = NULL;
char* default_encoding_c;
sys = PyImport_ImportModule("sys");
if (!sys) goto bad;
default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL);
Py_DECREF(sys);
if (!default_encoding) goto bad;
default_encoding_c = PyBytes_AsString(default_encoding);
if (!default_encoding_c) goto bad;
__PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1);
if (!__PYX_DEFAULT_STRING_ENCODING) goto bad;
strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c);
Py_DECREF(default_encoding);
return 0;
bad:
Py_XDECREF(default_encoding);
return -1;
}
#endif
#endif
/* Test for GCC > 2.95 */
#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#else /* !__GNUC__ or GCC < 2.95 */
#define likely(x) (x)
#define unlikely(x) (x)
#endif /* __GNUC__ */
static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; }
static PyObject *__pyx_m = NULL;
static PyObject *__pyx_d;
static PyObject *__pyx_b;
static PyObject *__pyx_cython_runtime = NULL;
static PyObject *__pyx_empty_tuple;
static PyObject *__pyx_empty_bytes;
static PyObject *__pyx_empty_unicode;
static int __pyx_lineno;
static int __pyx_clineno = 0;
static const char * __pyx_cfilenm= __FILE__;
static const char *__pyx_filename;
/* Header.proto */
#if !defined(CYTHON_CCOMPLEX)
#if defined(__cplusplus)
#define CYTHON_CCOMPLEX 1
#elif defined(_Complex_I)
#define CYTHON_CCOMPLEX 1
#else
#define CYTHON_CCOMPLEX 0
#endif
#endif
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#include <complex>
#else
#include <complex.h>
#endif
#endif
#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__)
#undef _Complex_I
#define _Complex_I 1.0fj
#endif
static const char *__pyx_f[] = {
"draco/util/truncate.pyx",
"__init__.pxd",
"stringsource",
"type.pxd",
};
/* NoFastGil.proto */
#define __Pyx_PyGILState_Ensure PyGILState_Ensure
#define __Pyx_PyGILState_Release PyGILState_Release
#define __Pyx_FastGIL_Remember()
#define __Pyx_FastGIL_Forget()
#define __Pyx_FastGilFuncInit()
/* ForceInitThreads.proto */
#ifndef __PYX_FORCE_INIT_THREADS
#define __PYX_FORCE_INIT_THREADS 0
#endif
/* MemviewSliceStruct.proto */
struct __pyx_memoryview_obj;
typedef struct {
struct __pyx_memoryview_obj *memview;
char *data;
Py_ssize_t shape[8];
Py_ssize_t strides[8];
Py_ssize_t suboffsets[8];
} __Pyx_memviewslice;
#define __Pyx_MemoryView_Len(m) (m.shape[0])
/* Atomics.proto */
#include <pythread.h>
#ifndef CYTHON_ATOMICS
#define CYTHON_ATOMICS 1
#endif
#define __pyx_atomic_int_type int
#if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\
(__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\
!defined(__i386__)
#define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1)
#define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using GNU atomics"
#endif
#elif CYTHON_ATOMICS && defined(_MSC_VER) && 0
#include <Windows.h>
#undef __pyx_atomic_int_type
#define __pyx_atomic_int_type LONG
#define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#pragma message ("Using MSVC atomics")
#endif
#elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0
#define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value)
#define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value)
#ifdef __PYX_DEBUG_ATOMICS
#warning "Using Intel atomics"
#endif
#else
#undef CYTHON_ATOMICS
#define CYTHON_ATOMICS 0
#ifdef __PYX_DEBUG_ATOMICS
#warning "Not using atomics"
#endif
#endif
typedef volatile __pyx_atomic_int_type __pyx_atomic_int;
#if CYTHON_ATOMICS
#define __pyx_add_acquisition_count(memview)\
__pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock)
#else
#define __pyx_add_acquisition_count(memview)\
__pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#define __pyx_sub_acquisition_count(memview)\
__pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock)
#endif
/* BufferFormatStructs.proto */
#define IS_UNSIGNED(type) (((type) -1) > 0)
struct __Pyx_StructField_;
#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0)
typedef struct {
const char* name;
struct __Pyx_StructField_* fields;
size_t size;
size_t arraysize[8];
int ndim;
char typegroup;
char is_unsigned;
int flags;
} __Pyx_TypeInfo;
typedef struct __Pyx_StructField_ {
__Pyx_TypeInfo* type;
const char* name;
size_t offset;
} __Pyx_StructField;
typedef struct {
__Pyx_StructField* field;
size_t parent_offset;
} __Pyx_BufFmt_StackElem;
typedef struct {
__Pyx_StructField root;
__Pyx_BufFmt_StackElem* head;
size_t fmt_offset;
size_t new_count, enc_count;
size_t struct_alignment;
int is_complex;
char enc_type;
char new_packmode;
char enc_packmode;
char is_valid_array;
} __Pyx_BufFmt_Context;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":689
* # in Cython to enable them only on the right systems.
*
* ctypedef npy_int8 int8_t # <<<<<<<<<<<<<<
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
*/
typedef npy_int8 __pyx_t_5numpy_int8_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":690
*
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t # <<<<<<<<<<<<<<
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t
*/
typedef npy_int16 __pyx_t_5numpy_int16_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":691
* ctypedef npy_int8 int8_t
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t # <<<<<<<<<<<<<<
* ctypedef npy_int64 int64_t
* #ctypedef npy_int96 int96_t
*/
typedef npy_int32 __pyx_t_5numpy_int32_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":692
* ctypedef npy_int16 int16_t
* ctypedef npy_int32 int32_t
* ctypedef npy_int64 int64_t # <<<<<<<<<<<<<<
* #ctypedef npy_int96 int96_t
* #ctypedef npy_int128 int128_t
*/
typedef npy_int64 __pyx_t_5numpy_int64_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":696
* #ctypedef npy_int128 int128_t
*
* ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<<
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
*/
typedef npy_uint8 __pyx_t_5numpy_uint8_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":697
*
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<<
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t
*/
typedef npy_uint16 __pyx_t_5numpy_uint16_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":698
* ctypedef npy_uint8 uint8_t
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<<
* ctypedef npy_uint64 uint64_t
* #ctypedef npy_uint96 uint96_t
*/
typedef npy_uint32 __pyx_t_5numpy_uint32_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":699
* ctypedef npy_uint16 uint16_t
* ctypedef npy_uint32 uint32_t
* ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<<
* #ctypedef npy_uint96 uint96_t
* #ctypedef npy_uint128 uint128_t
*/
typedef npy_uint64 __pyx_t_5numpy_uint64_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":703
* #ctypedef npy_uint128 uint128_t
*
* ctypedef npy_float32 float32_t # <<<<<<<<<<<<<<
* ctypedef npy_float64 float64_t
* #ctypedef npy_float80 float80_t
*/
typedef npy_float32 __pyx_t_5numpy_float32_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":704
*
* ctypedef npy_float32 float32_t
* ctypedef npy_float64 float64_t # <<<<<<<<<<<<<<
* #ctypedef npy_float80 float80_t
* #ctypedef npy_float128 float128_t
*/
typedef npy_float64 __pyx_t_5numpy_float64_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":713
* # The int types are mapped a bit surprising --
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t
*/
typedef npy_long __pyx_t_5numpy_int_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":714
* # numpy.int corresponds to 'l' and numpy.long to 'q'
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t # <<<<<<<<<<<<<<
* ctypedef npy_longlong longlong_t
*
*/
typedef npy_longlong __pyx_t_5numpy_long_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":715
* ctypedef npy_long int_t
* ctypedef npy_longlong long_t
* ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_ulong uint_t
*/
typedef npy_longlong __pyx_t_5numpy_longlong_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":717
* ctypedef npy_longlong longlong_t
*
* ctypedef npy_ulong uint_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t
*/
typedef npy_ulong __pyx_t_5numpy_uint_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":718
*
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<<
* ctypedef npy_ulonglong ulonglong_t
*
*/
typedef npy_ulonglong __pyx_t_5numpy_ulong_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":719
* ctypedef npy_ulong uint_t
* ctypedef npy_ulonglong ulong_t
* ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<<
*
* ctypedef npy_intp intp_t
*/
typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":721
* ctypedef npy_ulonglong ulonglong_t
*
* ctypedef npy_intp intp_t # <<<<<<<<<<<<<<
* ctypedef npy_uintp uintp_t
*
*/
typedef npy_intp __pyx_t_5numpy_intp_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":722
*
* ctypedef npy_intp intp_t
* ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<<
*
* ctypedef npy_double float_t
*/
typedef npy_uintp __pyx_t_5numpy_uintp_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":724
* ctypedef npy_uintp uintp_t
*
* ctypedef npy_double float_t # <<<<<<<<<<<<<<
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t
*/
typedef npy_double __pyx_t_5numpy_float_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":725
*
* ctypedef npy_double float_t
* ctypedef npy_double double_t # <<<<<<<<<<<<<<
* ctypedef npy_longdouble longdouble_t
*
*/
typedef npy_double __pyx_t_5numpy_double_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":726
* ctypedef npy_double float_t
* ctypedef npy_double double_t
* ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cfloat cfloat_t
*/
typedef npy_longdouble __pyx_t_5numpy_longdouble_t;
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< float > __pyx_t_float_complex;
#else
typedef float _Complex __pyx_t_float_complex;
#endif
#else
typedef struct { float real, imag; } __pyx_t_float_complex;
#endif
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float);
/* Declarations.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
typedef ::std::complex< double > __pyx_t_double_complex;
#else
typedef double _Complex __pyx_t_double_complex;
#endif
#else
typedef struct { double real, imag; } __pyx_t_double_complex;
#endif
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double);
/*--- Type declarations ---*/
struct __pyx_array_obj;
struct __pyx_MemviewEnum_obj;
struct __pyx_memoryview_obj;
struct __pyx_memoryviewslice_obj;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":728
* ctypedef npy_longdouble longdouble_t
*
* ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<<
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t
*/
typedef npy_cfloat __pyx_t_5numpy_cfloat_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":729
*
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<<
* ctypedef npy_clongdouble clongdouble_t
*
*/
typedef npy_cdouble __pyx_t_5numpy_cdouble_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":730
* ctypedef npy_cfloat cfloat_t
* ctypedef npy_cdouble cdouble_t
* ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<<
*
* ctypedef npy_cdouble complex_t
*/
typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":732
* ctypedef npy_clongdouble clongdouble_t
*
* ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew1(a):
*/
typedef npy_cdouble __pyx_t_5numpy_complex_t;
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_array_obj {
PyObject_HEAD
struct __pyx_vtabstruct_array *__pyx_vtab;
char *data;
Py_ssize_t len;
char *format;
int ndim;
Py_ssize_t *_shape;
Py_ssize_t *_strides;
Py_ssize_t itemsize;
PyObject *mode;
PyObject *_format;
void (*callback_free_data)(void *);
int free_data;
int dtype_is_object;
};
/* "View.MemoryView":279
*
* @cname('__pyx_MemviewEnum')
* cdef class Enum(object): # <<<<<<<<<<<<<<
* cdef object name
* def __init__(self, name):
*/
struct __pyx_MemviewEnum_obj {
PyObject_HEAD
PyObject *name;
};
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_memoryview_obj {
PyObject_HEAD
struct __pyx_vtabstruct_memoryview *__pyx_vtab;
PyObject *obj;
PyObject *_size;
PyObject *_array_interface;
PyThread_type_lock lock;
__pyx_atomic_int acquisition_count[2];
__pyx_atomic_int *acquisition_count_aligned_p;
Py_buffer view;
int flags;
int dtype_is_object;
__Pyx_TypeInfo *typeinfo;
};
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_memoryviewslice_obj {
struct __pyx_memoryview_obj __pyx_base;
__Pyx_memviewslice from_slice;
PyObject *from_object;
PyObject *(*to_object_func)(char *);
int (*to_dtype_func)(char *, PyObject *);
};
/* "View.MemoryView":105
*
* @cname("__pyx_array")
* cdef class array: # <<<<<<<<<<<<<<
*
* cdef:
*/
struct __pyx_vtabstruct_array {
PyObject *(*get_memview)(struct __pyx_array_obj *);
};
static struct __pyx_vtabstruct_array *__pyx_vtabptr_array;
/* "View.MemoryView":330
*
* @cname('__pyx_memoryview')
* cdef class memoryview(object): # <<<<<<<<<<<<<<
*
* cdef object obj
*/
struct __pyx_vtabstruct_memoryview {
char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *);
PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *);
PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *);
PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *);
};
static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview;
/* "View.MemoryView":965
*
* @cname('__pyx_memoryviewslice')
* cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<<
* "Internal class for passing memoryview slices to Python"
*
*/
struct __pyx_vtabstruct__memoryviewslice {
struct __pyx_vtabstruct_memoryview __pyx_base;
};
static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice;
/* --- Runtime support code (head) --- */
/* Refnanny.proto */
#ifndef CYTHON_REFNANNY
#define CYTHON_REFNANNY 0
#endif
#if CYTHON_REFNANNY
typedef struct {
void (*INCREF)(void*, PyObject*, int);
void (*DECREF)(void*, PyObject*, int);
void (*GOTREF)(void*, PyObject*, int);
void (*GIVEREF)(void*, PyObject*, int);
void* (*SetupContext)(const char*, int, const char*);
void (*FinishContext)(void**);
} __Pyx_RefNannyAPIStruct;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL;
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname);
#define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL;
#ifdef WITH_THREAD
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
if (acquire_gil) {\
PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
PyGILState_Release(__pyx_gilstate_save);\
} else {\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\
}
#else
#define __Pyx_RefNannySetupContext(name, acquire_gil)\
__pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__)
#endif
#define __Pyx_RefNannyFinishContext()\
__Pyx_RefNanny->FinishContext(&__pyx_refnanny)
#define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__)
#define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0)
#define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0)
#define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0)
#define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0)
#else
#define __Pyx_RefNannyDeclarations
#define __Pyx_RefNannySetupContext(name, acquire_gil)
#define __Pyx_RefNannyFinishContext()
#define __Pyx_INCREF(r) Py_INCREF(r)
#define __Pyx_DECREF(r) Py_DECREF(r)
#define __Pyx_GOTREF(r)
#define __Pyx_GIVEREF(r)
#define __Pyx_XINCREF(r) Py_XINCREF(r)
#define __Pyx_XDECREF(r) Py_XDECREF(r)
#define __Pyx_XGOTREF(r)
#define __Pyx_XGIVEREF(r)
#endif
#define __Pyx_XDECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_XDECREF(tmp);\
} while (0)
#define __Pyx_DECREF_SET(r, v) do {\
PyObject *tmp = (PyObject *) r;\
r = v; __Pyx_DECREF(tmp);\
} while (0)
#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0)
#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0)
/* PyObjectGetAttrStr.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n)
#endif
/* GetBuiltinName.proto */
static PyObject *__Pyx_GetBuiltinName(PyObject *name);
/* RaiseArgTupleInvalid.proto */
static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact,
Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found);
/* RaiseDoubleKeywords.proto */
static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name);
/* ParseKeywords.proto */
static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\
PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\
const char* function_name);
/* PyIntCompare.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, long intval, long inplace);
/* PyObjectCall.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw);
#else
#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw)
#endif
/* PyThreadStateGet.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate;
#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current;
#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type
#else
#define __Pyx_PyThreadState_declare
#define __Pyx_PyThreadState_assign
#define __Pyx_PyErr_Occurred() PyErr_Occurred()
#endif
/* PyErrFetchRestore.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL)
#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL))
#else
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#endif
#else
#define __Pyx_PyErr_Clear() PyErr_Clear()
#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc)
#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb)
#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb)
#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb)
#endif
/* RaiseException.proto */
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause);
/* PyFunctionFastCall.proto */
#if CYTHON_FAST_PYCALL
#define __Pyx_PyFunction_FastCall(func, args, nargs)\
__Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL)
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs);
#else
#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs)
#endif
#define __Pyx_BUILD_ASSERT_EXPR(cond)\
(sizeof(char [1 - 2*!(cond)]) - 1)
#ifndef Py_MEMBER_SIZE
#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member)
#endif
static size_t __pyx_pyframe_localsplus_offset = 0;
#include "frameobject.h"
#define __Pxy_PyFrame_Initialize_Offsets()\
((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\
(void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus)))
#define __Pyx_PyFrame_GetLocalsplus(frame)\
(assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset))
#endif
/* PyCFunctionFastCall.proto */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs);
#else
#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL)
#endif
/* PyObjectCallMethO.proto */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg);
#endif
/* PyObjectCallOneArg.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg);
/* PyDictVersioning.proto */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1)
#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\
(version_var) = __PYX_GET_DICT_VERSION(dict);\
(cache_var) = (value);
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\
(VAR) = __pyx_dict_cached_value;\
} else {\
(VAR) = __pyx_dict_cached_value = (LOOKUP);\
__pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\
}\
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj);
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj);
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version);
#else
#define __PYX_GET_DICT_VERSION(dict) (0)
#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)
#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP);
#endif
/* GetModuleGlobalName.proto */
#if CYTHON_USE_DICT_VERSIONS
#define __Pyx_GetModuleGlobalName(var, name) {\
static PY_UINT64_T __pyx_dict_version = 0;\
static PyObject *__pyx_dict_cached_value = NULL;\
(var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\
(likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\
__Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
#define __Pyx_GetModuleGlobalNameUncached(var, name) {\
PY_UINT64_T __pyx_dict_version;\
PyObject *__pyx_dict_cached_value;\
(var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\
}
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value);
#else
#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name)
#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name)
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name);
#endif
/* PyObjectCall2Args.proto */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2);
/* MemviewSliceInit.proto */
#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d
#define __Pyx_MEMVIEW_DIRECT 1
#define __Pyx_MEMVIEW_PTR 2
#define __Pyx_MEMVIEW_FULL 4
#define __Pyx_MEMVIEW_CONTIG 8
#define __Pyx_MEMVIEW_STRIDED 16
#define __Pyx_MEMVIEW_FOLLOW 32
#define __Pyx_IS_C_CONTIG 1
#define __Pyx_IS_F_CONTIG 2
static int __Pyx_init_memviewslice(
struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference);
static CYTHON_INLINE int __pyx_add_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(
__pyx_atomic_int *acquisition_count, PyThread_type_lock lock);
#define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p)
#define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview))
#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__)
#define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__)
static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int);
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int);
/* GetTopmostException.proto */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate);
#endif
/* SaveResetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb);
#else
#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb)
#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb)
#endif
/* PyErrExceptionMatches.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err)
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err);
#else
#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err)
#endif
/* GetException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb)
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* ArgTypeTest.proto */
#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\
((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\
__Pyx__ArgTypeTest(obj, type, name, exact))
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact);
/* IncludeStringH.proto */
#include <string.h>
/* BytesEquals.proto */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals);
/* UnicodeEquals.proto */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals);
/* StrEquals.proto */
#if PY_MAJOR_VERSION >= 3
#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals
#else
#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals
#endif
/* None.proto */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t);
/* UnaryNegOverflows.proto */
#define UNARY_NEG_WOULD_OVERFLOW(x)\
(((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x)))
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/
/* GetAttr.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *);
/* GetItemInt.proto */
#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\
(is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\
__Pyx_GetItemInt_Generic(o, to_py_func(i))))
#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\
(__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\
__Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\
(PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL))
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
int wraparound, int boundscheck);
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j);
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i,
int is_list, int wraparound, int boundscheck);
/* ObjectGetItem.proto */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key);
#else
#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key)
#endif
/* decode_c_string_utf16.proto */
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 0;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = -1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) {
int byteorder = 1;
return PyUnicode_DecodeUTF16(s, size, errors, &byteorder);
}
/* decode_c_string.proto */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors));
/* GetAttr3.proto */
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *);
/* RaiseTooManyValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected);
/* RaiseNeedMoreValuesToUnpack.proto */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index);
/* RaiseNoneIterError.proto */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void);
/* ExtTypeTest.proto */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type);
/* SwapException.proto */
#if CYTHON_FAST_THREAD_STATE
#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb)
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb);
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb);
#endif
/* Import.proto */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level);
/* FastTypeChecks.proto */
#if CYTHON_COMPILING_IN_CPYTHON
#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type)
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type);
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2);
#else
#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type)
#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type)
#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2))
#endif
#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception)
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
/* ListCompAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len)) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x)
#endif
/* PyIntBinop.proto */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace, int zerodivision_check);
#else
#define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace, zerodivision_check)\
(inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2))
#endif
/* ListExtend.proto */
static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) {
#if CYTHON_COMPILING_IN_CPYTHON
PyObject* none = _PyList_Extend((PyListObject*)L, v);
if (unlikely(!none))
return -1;
Py_DECREF(none);
return 0;
#else
return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v);
#endif
}
/* ListAppend.proto */
#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS
static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) {
PyListObject* L = (PyListObject*) list;
Py_ssize_t len = Py_SIZE(list);
if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) {
Py_INCREF(x);
PyList_SET_ITEM(list, len, x);
__Pyx_SET_SIZE(list, len + 1);
return 0;
}
return PyList_Append(list, x);
}
#else
#define __Pyx_PyList_Append(L,x) PyList_Append(L,x)
#endif
/* None.proto */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname);
/* None.proto */
static CYTHON_INLINE long __Pyx_div_long(long, long);
/* ImportFrom.proto */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name);
/* HasAttr.proto */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *);
/* PyObject_GenericGetAttrNoDict.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr
#endif
/* PyObject_GenericGetAttr.proto */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name);
#else
#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr
#endif
/* SetVTable.proto */
static int __Pyx_SetVtable(PyObject *dict, void *vtable);
/* PyObjectGetAttrStrNoError.proto */
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name);
/* SetupReduce.proto */
static int __Pyx_setup_reduce(PyObject* type_obj);
/* TypeImport.proto */
#ifndef __PYX_HAVE_RT_ImportType_proto
#define __PYX_HAVE_RT_ImportType_proto
enum __Pyx_ImportType_CheckSize {
__Pyx_ImportType_CheckSize_Error = 0,
__Pyx_ImportType_CheckSize_Warn = 1,
__Pyx_ImportType_CheckSize_Ignore = 2
};
static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size);
#endif
/* CLineInTraceback.proto */
#ifdef CYTHON_CLINE_IN_TRACEBACK
#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0)
#else
static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line);
#endif
/* CodeObjectCache.proto */
typedef struct {
PyCodeObject* code_object;
int code_line;
} __Pyx_CodeObjectCacheEntry;
struct __Pyx_CodeObjectCache {
int count;
int max_count;
__Pyx_CodeObjectCacheEntry* entries;
};
static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL};
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line);
static PyCodeObject *__pyx_find_code_object(int code_line);
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object);
/* AddTraceback.proto */
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename);
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags);
static void __Pyx_ReleaseBuffer(Py_buffer *view);
#else
#define __Pyx_GetBuffer PyObject_GetBuffer
#define __Pyx_ReleaseBuffer PyBuffer_Release
#endif
/* BufferStructDeclare.proto */
typedef struct {
Py_ssize_t shape, strides, suboffsets;
} __Pyx_Buf_DimInfo;
typedef struct {
size_t refcount;
Py_buffer pybuffer;
} __Pyx_Buffer;
typedef struct {
__Pyx_Buffer *rcbuffer;
char *data;
__Pyx_Buf_DimInfo diminfo[8];
} __Pyx_LocalBuf_ND;
/* MemviewSliceIsContig.proto */
static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim);
/* OverlappingSlices.proto */
static int __pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize);
/* Capsule.proto */
static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig);
/* IsLittleEndian.proto */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void);
/* BufferFormatCheck.proto */
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts);
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type);
/* TypeInfoCompare.proto */
static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b);
/* MemviewSliceValidateAndInit.proto */
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj);
/* ObjectToMemviewSlice.proto */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *, int writable_flag);
/* MemviewDtypeToObject.proto */
static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp);
static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value);
/* RealImag.proto */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
#define __Pyx_CREAL(z) ((z).real())
#define __Pyx_CIMAG(z) ((z).imag())
#else
#define __Pyx_CREAL(z) (__real__(z))
#define __Pyx_CIMAG(z) (__imag__(z))
#endif
#else
#define __Pyx_CREAL(z) ((z).real)
#define __Pyx_CIMAG(z) ((z).imag)
#endif
#if defined(__cplusplus) && CYTHON_CCOMPLEX\
&& (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103)
#define __Pyx_SET_CREAL(z,x) ((z).real(x))
#define __Pyx_SET_CIMAG(z,y) ((z).imag(y))
#else
#define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x)
#define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y)
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_float(a, b) ((a)==(b))
#define __Pyx_c_sum_float(a, b) ((a)+(b))
#define __Pyx_c_diff_float(a, b) ((a)-(b))
#define __Pyx_c_prod_float(a, b) ((a)*(b))
#define __Pyx_c_quot_float(a, b) ((a)/(b))
#define __Pyx_c_neg_float(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_float(z) ((z)==(float)0)
#define __Pyx_c_conj_float(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_float(z) (::std::abs(z))
#define __Pyx_c_pow_float(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_float(z) ((z)==0)
#define __Pyx_c_conj_float(z) (conjf(z))
#if 1
#define __Pyx_c_abs_float(z) (cabsf(z))
#define __Pyx_c_pow_float(a, b) (cpowf(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex);
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex);
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex);
#endif
#endif
/* Arithmetic.proto */
#if CYTHON_CCOMPLEX
#define __Pyx_c_eq_double(a, b) ((a)==(b))
#define __Pyx_c_sum_double(a, b) ((a)+(b))
#define __Pyx_c_diff_double(a, b) ((a)-(b))
#define __Pyx_c_prod_double(a, b) ((a)*(b))
#define __Pyx_c_quot_double(a, b) ((a)/(b))
#define __Pyx_c_neg_double(a) (-(a))
#ifdef __cplusplus
#define __Pyx_c_is_zero_double(z) ((z)==(double)0)
#define __Pyx_c_conj_double(z) (::std::conj(z))
#if 1
#define __Pyx_c_abs_double(z) (::std::abs(z))
#define __Pyx_c_pow_double(a, b) (::std::pow(a, b))
#endif
#else
#define __Pyx_c_is_zero_double(z) ((z)==0)
#define __Pyx_c_conj_double(z) (conj(z))
#if 1
#define __Pyx_c_abs_double(z) (cabs(z))
#define __Pyx_c_pow_double(a, b) (cpow(a, b))
#endif
#endif
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex);
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex);
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex);
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex);
#endif
#endif
/* MemviewSliceCopyTemplate.proto */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object);
/* CIntFromPy.proto */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *);
/* CIntFromPy.proto */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *);
/* CIntToPy.proto */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value);
/* CIntFromPy.proto */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *);
/* CheckBinaryVersion.proto */
static int __Pyx_check_binary_version(void);
/* InitStrings.proto */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t);
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/
/* Module declarations from 'cython.view' */
/* Module declarations from 'cython' */
/* Module declarations from 'cpython.buffer' */
/* Module declarations from 'libc.string' */
/* Module declarations from 'libc.stdio' */
/* Module declarations from '__builtin__' */
/* Module declarations from 'cpython.type' */
static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0;
/* Module declarations from 'cpython' */
/* Module declarations from 'cpython.object' */
/* Module declarations from 'cpython.ref' */
/* Module declarations from 'cpython.mem' */
/* Module declarations from 'numpy' */
/* Module declarations from 'numpy' */
static PyTypeObject *__pyx_ptype_5numpy_dtype = 0;
static PyTypeObject *__pyx_ptype_5numpy_flatiter = 0;
static PyTypeObject *__pyx_ptype_5numpy_broadcast = 0;
static PyTypeObject *__pyx_ptype_5numpy_ndarray = 0;
static PyTypeObject *__pyx_ptype_5numpy_ufunc = 0;
/* Module declarations from 'draco.util.truncate' */
static PyTypeObject *__pyx_array_type = 0;
static PyTypeObject *__pyx_MemviewEnum_type = 0;
static PyTypeObject *__pyx_memoryview_type = 0;
static PyTypeObject *__pyx_memoryviewslice_type = 0;
static PyObject *generic = 0;
static PyObject *strided = 0;
static PyObject *indirect = 0;
static PyObject *contiguous = 0;
static PyObject *indirect_contiguous = 0;
static int __pyx_memoryview_thread_locks_used;
static PyThread_type_lock __pyx_memoryview_thread_locks[8];
static CYTHON_INLINE PyObject *__Pyx_carray_to_py_Py_ssize_t(Py_ssize_t *, Py_ssize_t); /*proto*/
static CYTHON_INLINE PyObject *__Pyx_carray_to_tuple_Py_ssize_t(Py_ssize_t *, Py_ssize_t); /*proto*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/
static void *__pyx_align_pointer(void *, size_t); /*proto*/
static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/
static PyObject *_unellipsify(PyObject *, int); /*proto*/
static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/
static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/
static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/
static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/
static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/
static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/
static int __pyx_memoryview_err(PyObject *, char *); /*proto*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/
static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/
static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 };
#define __Pyx_MODULE_NAME "draco.util.truncate"
extern int __pyx_module_is_main_draco__util__truncate;
int __pyx_module_is_main_draco__util__truncate = 0;
/* Implementation of 'draco.util.truncate' */
static PyObject *__pyx_builtin_ValueError;
static PyObject *__pyx_builtin_range;
static PyObject *__pyx_builtin_ImportError;
static PyObject *__pyx_builtin_MemoryError;
static PyObject *__pyx_builtin_enumerate;
static PyObject *__pyx_builtin_TypeError;
static PyObject *__pyx_builtin_Ellipsis;
static PyObject *__pyx_builtin_id;
static PyObject *__pyx_builtin_IndexError;
static const char __pyx_k_O[] = "O";
static const char __pyx_k_c[] = "c";
static const char __pyx_k_i[] = "i";
static const char __pyx_k_n[] = "n";
static const char __pyx_k_id[] = "id";
static const char __pyx_k_np[] = "np";
static const char __pyx_k_err[] = "err";
static const char __pyx_k_new[] = "__new__";
static const char __pyx_k_obj[] = "obj";
static const char __pyx_k_val[] = "val";
static const char __pyx_k_wgt[] = "wgt";
static const char __pyx_k_base[] = "base";
static const char __pyx_k_dict[] = "__dict__";
static const char __pyx_k_main[] = "__main__";
static const char __pyx_k_mode[] = "mode";
static const char __pyx_k_name[] = "name";
static const char __pyx_k_ndim[] = "ndim";
static const char __pyx_k_pack[] = "pack";
static const char __pyx_k_prec[] = "prec";
static const char __pyx_k_size[] = "size";
static const char __pyx_k_step[] = "step";
static const char __pyx_k_stop[] = "stop";
static const char __pyx_k_test[] = "__test__";
static const char __pyx_k_ASCII[] = "ASCII";
static const char __pyx_k_class[] = "__class__";
static const char __pyx_k_error[] = "error";
static const char __pyx_k_flags[] = "flags";
static const char __pyx_k_numpy[] = "numpy";
static const char __pyx_k_range[] = "range";
static const char __pyx_k_shape[] = "shape";
static const char __pyx_k_start[] = "start";
static const char __pyx_k_encode[] = "encode";
static const char __pyx_k_format[] = "format";
static const char __pyx_k_import[] = "__import__";
static const char __pyx_k_name_2[] = "__name__";
static const char __pyx_k_pickle[] = "pickle";
static const char __pyx_k_reduce[] = "__reduce__";
static const char __pyx_k_struct[] = "struct";
static const char __pyx_k_unpack[] = "unpack";
static const char __pyx_k_update[] = "update";
static const char __pyx_k_asarray[] = "asarray";
static const char __pyx_k_fortran[] = "fortran";
static const char __pyx_k_memview[] = "memview";
static const char __pyx_k_Ellipsis[] = "Ellipsis";
static const char __pyx_k_fallback[] = "fallback";
static const char __pyx_k_getstate[] = "__getstate__";
static const char __pyx_k_itemsize[] = "itemsize";
static const char __pyx_k_pyx_type[] = "__pyx_type";
static const char __pyx_k_setstate[] = "__setstate__";
static const char __pyx_k_TypeError[] = "TypeError";
static const char __pyx_k_enumerate[] = "enumerate";
static const char __pyx_k_pyx_state[] = "__pyx_state";
static const char __pyx_k_reduce_ex[] = "__reduce_ex__";
static const char __pyx_k_IndexError[] = "IndexError";
static const char __pyx_k_ValueError[] = "ValueError";
static const char __pyx_k_pyx_result[] = "__pyx_result";
static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__";
static const char __pyx_k_ImportError[] = "ImportError";
static const char __pyx_k_MemoryError[] = "MemoryError";
static const char __pyx_k_PickleError[] = "PickleError";
static const char __pyx_k_bit_truncate[] = "bit_truncate";
static const char __pyx_k_pyx_checksum[] = "__pyx_checksum";
static const char __pyx_k_stringsource[] = "stringsource";
static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer";
static const char __pyx_k_reduce_cython[] = "__reduce_cython__";
static const char __pyx_k_View_MemoryView[] = "View.MemoryView";
static const char __pyx_k_allocate_buffer[] = "allocate_buffer";
static const char __pyx_k_dtype_is_object[] = "dtype_is_object";
static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError";
static const char __pyx_k_setstate_cython[] = "__setstate_cython__";
static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum";
static const char __pyx_k_bit_truncate_fixed[] = "bit_truncate_fixed";
static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback";
static const char __pyx_k_strided_and_direct[] = "<strided and direct>";
static const char __pyx_k_draco_util_truncate[] = "draco.util.truncate";
static const char __pyx_k_bit_truncate_weights[] = "bit_truncate_weights";
static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>";
static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>";
static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>";
static const char __pyx_k_draco_truncation_utils[] = "draco truncation utils";
static const char __pyx_k_Input_array_must_be_1_d[] = "Input array must be 1-d.";
static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>";
static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>";
static const char __pyx_k_draco_util_truncate_pyx[] = "draco/util/truncate.pyx";
static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'";
static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d.";
static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array";
static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data.";
static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>";
static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import";
static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides";
static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory.";
static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview";
static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview";
static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array";
static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))";
static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported";
static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s";
static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)";
static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object";
static const char __pyx_k_Weight_and_value_arrays_must_hav[] = "Weight and value arrays must have same shape ({:d} != {:d})";
static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)";
static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__";
static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import";
static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides.";
static PyObject *__pyx_n_s_ASCII;
static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri;
static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is;
static PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor;
static PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi;
static PyObject *__pyx_kp_s_Cannot_index_with_type_s;
static PyObject *__pyx_n_s_Ellipsis;
static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr;
static PyObject *__pyx_n_s_ImportError;
static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0;
static PyObject *__pyx_n_s_IndexError;
static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte;
static PyObject *__pyx_kp_s_Input_array_must_be_1_d;
static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr;
static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d;
static PyObject *__pyx_n_s_MemoryError;
static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x;
static PyObject *__pyx_kp_s_MemoryView_of_r_object;
static PyObject *__pyx_n_b_O;
static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a;
static PyObject *__pyx_n_s_PickleError;
static PyObject *__pyx_n_s_TypeError;
static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object;
static PyObject *__pyx_n_s_ValueError;
static PyObject *__pyx_n_s_View_MemoryView;
static PyObject *__pyx_kp_s_Weight_and_value_arrays_must_hav;
static PyObject *__pyx_n_s_allocate_buffer;
static PyObject *__pyx_n_s_asarray;
static PyObject *__pyx_n_s_base;
static PyObject *__pyx_n_s_bit_truncate;
static PyObject *__pyx_n_s_bit_truncate_fixed;
static PyObject *__pyx_n_s_bit_truncate_weights;
static PyObject *__pyx_n_s_c;
static PyObject *__pyx_n_u_c;
static PyObject *__pyx_n_s_class;
static PyObject *__pyx_n_s_cline_in_traceback;
static PyObject *__pyx_kp_s_contiguous_and_direct;
static PyObject *__pyx_kp_s_contiguous_and_indirect;
static PyObject *__pyx_n_s_dict;
static PyObject *__pyx_n_s_draco_util_truncate;
static PyObject *__pyx_kp_s_draco_util_truncate_pyx;
static PyObject *__pyx_n_s_dtype_is_object;
static PyObject *__pyx_n_s_encode;
static PyObject *__pyx_n_s_enumerate;
static PyObject *__pyx_n_s_err;
static PyObject *__pyx_n_s_error;
static PyObject *__pyx_n_s_fallback;
static PyObject *__pyx_n_s_flags;
static PyObject *__pyx_n_s_format;
static PyObject *__pyx_n_s_fortran;
static PyObject *__pyx_n_u_fortran;
static PyObject *__pyx_n_s_getstate;
static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi;
static PyObject *__pyx_n_s_i;
static PyObject *__pyx_n_s_id;
static PyObject *__pyx_n_s_import;
static PyObject *__pyx_n_s_itemsize;
static PyObject *__pyx_kp_s_itemsize_0_for_cython_array;
static PyObject *__pyx_n_s_main;
static PyObject *__pyx_n_s_memview;
static PyObject *__pyx_n_s_mode;
static PyObject *__pyx_n_s_n;
static PyObject *__pyx_n_s_name;
static PyObject *__pyx_n_s_name_2;
static PyObject *__pyx_n_s_ndim;
static PyObject *__pyx_n_s_new;
static PyObject *__pyx_kp_s_no_default___reduce___due_to_non;
static PyObject *__pyx_n_s_np;
static PyObject *__pyx_n_s_numpy;
static PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to;
static PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor;
static PyObject *__pyx_n_s_obj;
static PyObject *__pyx_n_s_pack;
static PyObject *__pyx_n_s_pickle;
static PyObject *__pyx_n_s_prec;
static PyObject *__pyx_n_s_pyx_PickleError;
static PyObject *__pyx_n_s_pyx_checksum;
static PyObject *__pyx_n_s_pyx_getbuffer;
static PyObject *__pyx_n_s_pyx_result;
static PyObject *__pyx_n_s_pyx_state;
static PyObject *__pyx_n_s_pyx_type;
static PyObject *__pyx_n_s_pyx_unpickle_Enum;
static PyObject *__pyx_n_s_pyx_vtable;
static PyObject *__pyx_n_s_range;
static PyObject *__pyx_n_s_reduce;
static PyObject *__pyx_n_s_reduce_cython;
static PyObject *__pyx_n_s_reduce_ex;
static PyObject *__pyx_n_s_setstate;
static PyObject *__pyx_n_s_setstate_cython;
static PyObject *__pyx_n_s_shape;
static PyObject *__pyx_n_s_size;
static PyObject *__pyx_n_s_start;
static PyObject *__pyx_n_s_step;
static PyObject *__pyx_n_s_stop;
static PyObject *__pyx_kp_s_strided_and_direct;
static PyObject *__pyx_kp_s_strided_and_direct_or_indirect;
static PyObject *__pyx_kp_s_strided_and_indirect;
static PyObject *__pyx_kp_s_stringsource;
static PyObject *__pyx_n_s_struct;
static PyObject *__pyx_n_s_test;
static PyObject *__pyx_kp_s_unable_to_allocate_array_data;
static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str;
static PyObject *__pyx_n_s_unpack;
static PyObject *__pyx_n_s_update;
static PyObject *__pyx_n_s_val;
static PyObject *__pyx_n_s_wgt;
static PyObject *__pyx_pf_5draco_4util_8truncate_bit_truncate(CYTHON_UNUSED PyObject *__pyx_self, float __pyx_v_val, float __pyx_v_err); /* proto */
static PyObject *__pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, __Pyx_memviewslice __pyx_v_wgt, float __pyx_v_fallback); /* proto */
static PyObject *__pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, float __pyx_v_prec); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/
static PyObject *__pyx_int_0;
static PyObject *__pyx_int_1;
static PyObject *__pyx_int_184977713;
static PyObject *__pyx_int_neg_1;
static PyObject *__pyx_tuple_;
static PyObject *__pyx_tuple__2;
static PyObject *__pyx_tuple__3;
static PyObject *__pyx_tuple__4;
static PyObject *__pyx_tuple__5;
static PyObject *__pyx_tuple__6;
static PyObject *__pyx_tuple__7;
static PyObject *__pyx_tuple__8;
static PyObject *__pyx_tuple__9;
static PyObject *__pyx_slice__18;
static PyObject *__pyx_tuple__10;
static PyObject *__pyx_tuple__11;
static PyObject *__pyx_tuple__12;
static PyObject *__pyx_tuple__13;
static PyObject *__pyx_tuple__14;
static PyObject *__pyx_tuple__15;
static PyObject *__pyx_tuple__16;
static PyObject *__pyx_tuple__17;
static PyObject *__pyx_tuple__19;
static PyObject *__pyx_tuple__20;
static PyObject *__pyx_tuple__21;
static PyObject *__pyx_tuple__22;
static PyObject *__pyx_tuple__24;
static PyObject *__pyx_tuple__26;
static PyObject *__pyx_tuple__28;
static PyObject *__pyx_tuple__29;
static PyObject *__pyx_tuple__30;
static PyObject *__pyx_tuple__31;
static PyObject *__pyx_tuple__32;
static PyObject *__pyx_tuple__33;
static PyObject *__pyx_codeobj__23;
static PyObject *__pyx_codeobj__25;
static PyObject *__pyx_codeobj__27;
static PyObject *__pyx_codeobj__34;
/* Late includes */
/* "draco/util/truncate.pyx":12
* inline float bit_truncate_float(float val, float err) nogil
*
* def bit_truncate(float val, float err): # <<<<<<<<<<<<<<
* return bit_truncate_float(val, err)
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_5draco_4util_8truncate_1bit_truncate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5draco_4util_8truncate_1bit_truncate = {"bit_truncate", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_1bit_truncate, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5draco_4util_8truncate_1bit_truncate(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
float __pyx_v_val;
float __pyx_v_err;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("bit_truncate (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_err,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_err)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("bit_truncate", 1, 2, 2, 1); __PYX_ERR(0, 12, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate") < 0)) __PYX_ERR(0, 12, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_val = __pyx_PyFloat_AsFloat(values[0]); if (unlikely((__pyx_v_val == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 12, __pyx_L3_error)
__pyx_v_err = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_err == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 12, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("bit_truncate", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 12, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("draco.util.truncate.bit_truncate", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5draco_4util_8truncate_bit_truncate(__pyx_self, __pyx_v_val, __pyx_v_err);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5draco_4util_8truncate_bit_truncate(CYTHON_UNUSED PyObject *__pyx_self, float __pyx_v_val, float __pyx_v_err) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("bit_truncate", 0);
/* "draco/util/truncate.pyx":13
*
* def bit_truncate(float val, float err):
* return bit_truncate_float(val, err) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyFloat_FromDouble(bit_truncate_float(__pyx_v_val, __pyx_v_err)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "draco/util/truncate.pyx":12
* inline float bit_truncate_float(float val, float err) nogil
*
* def bit_truncate(float val, float err): # <<<<<<<<<<<<<<
* return bit_truncate_float(val, err)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("draco.util.truncate.bit_truncate", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "draco/util/truncate.pyx":17
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* if val.ndim != 1:
*/
/* Python wrapper */
static PyObject *__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5draco_4util_8truncate_3bit_truncate_weights = {"bit_truncate_weights", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5draco_4util_8truncate_3bit_truncate_weights(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_val = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_memviewslice __pyx_v_wgt = { 0, 0, { 0 }, { 0 }, { 0 } };
float __pyx_v_fallback;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("bit_truncate_weights (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_wgt,&__pyx_n_s_fallback,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_wgt)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, 1); __PYX_ERR(0, 17, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_fallback)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, 2); __PYX_ERR(0, 17, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate_weights") < 0)) __PYX_ERR(0, 17, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v_val = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_val.memview)) __PYX_ERR(0, 17, __pyx_L3_error)
__pyx_v_wgt = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_wgt.memview)) __PYX_ERR(0, 17, __pyx_L3_error)
__pyx_v_fallback = __pyx_PyFloat_AsFloat(values[2]); if (unlikely((__pyx_v_fallback == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 17, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("bit_truncate_weights", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 17, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("draco.util.truncate.bit_truncate_weights", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(__pyx_self, __pyx_v_val, __pyx_v_wgt, __pyx_v_fallback);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5draco_4util_8truncate_2bit_truncate_weights(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, __Pyx_memviewslice __pyx_v_wgt, float __pyx_v_fallback) {
int __pyx_v_n;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_t_10;
Py_ssize_t __pyx_t_11;
Py_ssize_t __pyx_t_12;
double __pyx_t_13;
Py_ssize_t __pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("bit_truncate_weights", 0);
/* "draco/util/truncate.pyx":18
* @cython.wraparound(False)
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback):
* cdef int n = val.shape[0] # <<<<<<<<<<<<<<
* if val.ndim != 1:
* raise ValueError("Input array must be 1-d.")
*/
__pyx_v_n = (__pyx_v_val.shape[0]);
/* "draco/util/truncate.pyx":19
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback):
* cdef int n = val.shape[0]
* if val.ndim != 1: # <<<<<<<<<<<<<<
* raise ValueError("Input array must be 1-d.")
* if wgt.shape[0] != n:
*/
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyInt_NeObjC(__pyx_t_2, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_1); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 19, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(__pyx_t_3)) {
/* "draco/util/truncate.pyx":20
* cdef int n = val.shape[0]
* if val.ndim != 1:
* raise ValueError("Input array must be 1-d.") # <<<<<<<<<<<<<<
* if wgt.shape[0] != n:
* raise ValueError(
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(0, 20, __pyx_L1_error)
/* "draco/util/truncate.pyx":19
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback):
* cdef int n = val.shape[0]
* if val.ndim != 1: # <<<<<<<<<<<<<<
* raise ValueError("Input array must be 1-d.")
* if wgt.shape[0] != n:
*/
}
/* "draco/util/truncate.pyx":21
* if val.ndim != 1:
* raise ValueError("Input array must be 1-d.")
* if wgt.shape[0] != n: # <<<<<<<<<<<<<<
* raise ValueError(
* "Weight and value arrays must have same "
*/
__pyx_t_3 = (((__pyx_v_wgt.shape[0]) != __pyx_v_n) != 0);
if (unlikely(__pyx_t_3)) {
/* "draco/util/truncate.pyx":24
* raise ValueError(
* "Weight and value arrays must have same "
* "shape ({:d} != {:d})".format(wgt.shape, n) # <<<<<<<<<<<<<<
* )
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_kp_s_Weight_and_value_arrays_must_hav, __pyx_n_s_format); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_carray_to_py_Py_ssize_t(__pyx_v_wgt.shape, 8); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_n); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) {
PyObject *__pyx_temp[3] = {__pyx_t_6, __pyx_t_4, __pyx_t_5};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_6) {
__Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_6); __pyx_t_6 = NULL;
}
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_t_5);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "draco/util/truncate.pyx":22
* raise ValueError("Input array must be 1-d.")
* if wgt.shape[0] != n:
* raise ValueError( # <<<<<<<<<<<<<<
* "Weight and value arrays must have same "
* "shape ({:d} != {:d})".format(wgt.shape, n)
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 22, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(0, 22, __pyx_L1_error)
/* "draco/util/truncate.pyx":21
* if val.ndim != 1:
* raise ValueError("Input array must be 1-d.")
* if wgt.shape[0] != n: # <<<<<<<<<<<<<<
* raise ValueError(
* "Weight and value arrays must have same "
*/
}
/* "draco/util/truncate.pyx":27
* )
*
* cdef int i = 0 # <<<<<<<<<<<<<<
*
* for i in prange(n, nogil=True):
*/
__pyx_v_i = 0;
/* "draco/util/truncate.pyx":29
* cdef int i = 0
*
* for i in prange(n, nogil=True): # <<<<<<<<<<<<<<
* if wgt[i] != 0:
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5)
*/
{
#ifdef WITH_THREAD
PyThreadState *_save;
Py_UNBLOCK_THREADS
__Pyx_FastGIL_Remember();
#endif
/*try:*/ {
__pyx_t_7 = __pyx_v_n;
if ((1 == 0)) abort();
{
int __pyx_parallel_temp0 = ((int)0xbad0bad0);
const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0;
PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL;
int __pyx_parallel_why;
__pyx_parallel_why = 0;
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) (x)
#define unlikely(x) (x)
#endif
__pyx_t_10 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1;
if (__pyx_t_10 > 0)
{
#ifdef _OPENMP
#pragma omp parallel private(__pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_3) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb)
#endif /* _OPENMP */
{
#ifdef _OPENMP
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
Py_BEGIN_ALLOW_THREADS
#endif /* _OPENMP */
#ifdef _OPENMP
#pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i)
#endif /* _OPENMP */
for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_10; __pyx_t_9++){
if (__pyx_parallel_why < 2)
{
__pyx_v_i = (int)(0 + 1 * __pyx_t_9);
/* "draco/util/truncate.pyx":30
*
* for i in prange(n, nogil=True):
* if wgt[i] != 0: # <<<<<<<<<<<<<<
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5)
* else:
*/
__pyx_t_11 = __pyx_v_i;
__pyx_t_3 = (((*((float *) ( /* dim=0 */ (__pyx_v_wgt.data + __pyx_t_11 * __pyx_v_wgt.strides[0]) ))) != 0.0) != 0);
if (__pyx_t_3) {
/* "draco/util/truncate.pyx":31
* for i in prange(n, nogil=True):
* if wgt[i] != 0:
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5) # <<<<<<<<<<<<<<
* else:
* val[i] = bit_truncate_float(val[i], fallback * val[i])
*/
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_i;
__pyx_t_13 = pow(((double)(*((float *) ( /* dim=0 */ (__pyx_v_wgt.data + __pyx_t_12 * __pyx_v_wgt.strides[0]) )))), 0.5);
if (unlikely(__pyx_t_13 == 0)) {
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
PyErr_SetString(PyExc_ZeroDivisionError, "float division");
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
__PYX_ERR(0, 31, __pyx_L10_error)
}
__pyx_t_12 = __pyx_v_i;
*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_12 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_11 * __pyx_v_val.strides[0]) ))), (1. / __pyx_t_13));
/* "draco/util/truncate.pyx":30
*
* for i in prange(n, nogil=True):
* if wgt[i] != 0: # <<<<<<<<<<<<<<
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5)
* else:
*/
goto __pyx_L12;
}
/* "draco/util/truncate.pyx":33
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5)
* else:
* val[i] = bit_truncate_float(val[i], fallback * val[i]) # <<<<<<<<<<<<<<
*
* return np.asarray(val)
*/
/*else*/ {
__pyx_t_11 = __pyx_v_i;
__pyx_t_12 = __pyx_v_i;
__pyx_t_14 = __pyx_v_i;
*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_14 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_11 * __pyx_v_val.strides[0]) ))), (__pyx_v_fallback * (*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_12 * __pyx_v_val.strides[0]) )))));
}
__pyx_L12:;
goto __pyx_L14;
__pyx_L10_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_exc_type)
#endif /* _OPENMP */
if (!__pyx_parallel_exc_type) {
__Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb);
__pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno;
__Pyx_GOTREF(__pyx_parallel_exc_type);
}
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_parallel_why = 4;
goto __pyx_L13;
__pyx_L13:;
#ifdef _OPENMP
#pragma omp critical(__pyx_parallel_lastprivates0)
#endif /* _OPENMP */
{
__pyx_parallel_temp0 = __pyx_v_i;
}
__pyx_L14:;
#ifdef _OPENMP
#pragma omp flush(__pyx_parallel_why)
#endif /* _OPENMP */
}
}
#ifdef _OPENMP
Py_END_ALLOW_THREADS
#else
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
#endif /* _OPENMP */
/* Clean up any temporaries */
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
#ifndef _OPENMP
}
#endif /* _OPENMP */
}
}
if (__pyx_parallel_exc_type) {
/* This may have been overridden by a continue, break or return in another thread. Prefer the error. */
__pyx_parallel_why = 4;
}
if (__pyx_parallel_why) {
__pyx_v_i = __pyx_parallel_temp0;
switch (__pyx_parallel_why) {
case 4:
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_GIVEREF(__pyx_parallel_exc_type);
__Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb);
__pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno;
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
goto __pyx_L6_error;
}
}
}
#if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95)))))
#undef likely
#undef unlikely
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
}
/* "draco/util/truncate.pyx":29
* cdef int i = 0
*
* for i in prange(n, nogil=True): # <<<<<<<<<<<<<<
* if wgt[i] != 0:
* val[i] = bit_truncate_float(val[i], 1. / wgt[i]**0.5)
*/
/*finally:*/ {
/*normal exit:*/{
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L7;
}
__pyx_L6_error: {
#ifdef WITH_THREAD
__Pyx_FastGIL_Forget();
Py_BLOCK_THREADS
#endif
goto __pyx_L1_error;
}
__pyx_L7:;
}
}
/* "draco/util/truncate.pyx":35
* val[i] = bit_truncate_float(val[i], fallback * val[i])
*
* return np.asarray(val) # <<<<<<<<<<<<<<
*
* @cython.boundscheck(False)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_np); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_asarray); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_8))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_8);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_8, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_5, __pyx_t_1) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_1);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 35, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "draco/util/truncate.pyx":17
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* if val.ndim != 1:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("draco.util.truncate.bit_truncate_weights", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_val, 1);
__PYX_XDEC_MEMVIEW(&__pyx_v_wgt, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "draco/util/truncate.pyx":39
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* cdef int i = 0
*/
/* Python wrapper */
static PyObject *__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_5draco_4util_8truncate_5bit_truncate_fixed = {"bit_truncate_fixed", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_5draco_4util_8truncate_5bit_truncate_fixed(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
__Pyx_memviewslice __pyx_v_val = { 0, 0, { 0 }, { 0 }, { 0 } };
float __pyx_v_prec;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("bit_truncate_fixed (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_val,&__pyx_n_s_prec,0};
PyObject* values[2] = {0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_val)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_prec)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("bit_truncate_fixed", 1, 2, 2, 1); __PYX_ERR(0, 39, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "bit_truncate_fixed") < 0)) __PYX_ERR(0, 39, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 2) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
}
__pyx_v_val = __Pyx_PyObject_to_MemoryviewSlice_ds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_val.memview)) __PYX_ERR(0, 39, __pyx_L3_error)
__pyx_v_prec = __pyx_PyFloat_AsFloat(values[1]); if (unlikely((__pyx_v_prec == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 39, __pyx_L3_error)
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("bit_truncate_fixed", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 39, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("draco.util.truncate.bit_truncate_fixed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(__pyx_self, __pyx_v_val, __pyx_v_prec);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_5draco_4util_8truncate_4bit_truncate_fixed(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_val, float __pyx_v_prec) {
int __pyx_v_n;
int __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("bit_truncate_fixed", 0);
/* "draco/util/truncate.pyx":40
* @cython.wraparound(False)
* def bit_truncate_fixed(float[:] val, float prec):
* cdef int n = val.shape[0] # <<<<<<<<<<<<<<
* cdef int i = 0
*
*/
__pyx_v_n = (__pyx_v_val.shape[0]);
/* "draco/util/truncate.pyx":41
* def bit_truncate_fixed(float[:] val, float prec):
* cdef int n = val.shape[0]
* cdef int i = 0 # <<<<<<<<<<<<<<
*
* for i in range(n):
*/
__pyx_v_i = 0;
/* "draco/util/truncate.pyx":43
* cdef int i = 0
*
* for i in range(n): # <<<<<<<<<<<<<<
* val[i] = bit_truncate_float(val[i], prec * val[i])
*
*/
__pyx_t_1 = __pyx_v_n;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "draco/util/truncate.pyx":44
*
* for i in range(n):
* val[i] = bit_truncate_float(val[i], prec * val[i]) # <<<<<<<<<<<<<<
*
* return np.asarray(val)
*/
__pyx_t_4 = __pyx_v_i;
__pyx_t_5 = __pyx_v_i;
__pyx_t_6 = __pyx_v_i;
*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_6 * __pyx_v_val.strides[0]) )) = bit_truncate_float((*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_4 * __pyx_v_val.strides[0]) ))), (__pyx_v_prec * (*((float *) ( /* dim=0 */ (__pyx_v_val.data + __pyx_t_5 * __pyx_v_val.strides[0]) )))));
}
/* "draco/util/truncate.pyx":46
* val[i] = bit_truncate_float(val[i], prec * val[i])
*
* return np.asarray(val) # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_8, __pyx_n_s_np); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_t_8, __pyx_n_s_asarray); if (unlikely(!__pyx_t_9)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__pyx_t_8 = __pyx_memoryview_fromslice(__pyx_v_val, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
__pyx_t_10 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_9))) {
__pyx_t_10 = PyMethod_GET_SELF(__pyx_t_9);
if (likely(__pyx_t_10)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_9);
__Pyx_INCREF(__pyx_t_10);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_9, function);
}
}
__pyx_t_7 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_9, __pyx_t_10, __pyx_t_8) : __Pyx_PyObject_CallOneArg(__pyx_t_9, __pyx_t_8);
__Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 46, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_r = __pyx_t_7;
__pyx_t_7 = 0;
goto __pyx_L0;
/* "draco/util/truncate.pyx":39
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* cdef int i = 0
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("draco.util.truncate.bit_truncate_fixed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__PYX_XDEC_MEMVIEW(&__pyx_v_val, 1);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":734
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":735
*
* cdef inline object PyArray_MultiIterNew1(a):
* return PyArray_MultiIterNew(1, <void*>a) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew2(a, b):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 735, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":734
* ctypedef npy_cdouble complex_t
*
* cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(1, <void*>a)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":737
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":738
*
* cdef inline object PyArray_MultiIterNew2(a, b):
* return PyArray_MultiIterNew(2, <void*>a, <void*>b) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 738, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":737
* return PyArray_MultiIterNew(1, <void*>a)
*
* cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":740
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":741
*
* cdef inline object PyArray_MultiIterNew3(a, b, c):
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 741, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":740
* return PyArray_MultiIterNew(2, <void*>a, <void*>b)
*
* cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":743
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":744
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d):
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d) # <<<<<<<<<<<<<<
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 744, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":743
* return PyArray_MultiIterNew(3, <void*>a, <void*>b, <void*> c)
*
* cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":746
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":747
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e):
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e) # <<<<<<<<<<<<<<
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 747, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":746
* return PyArray_MultiIterNew(4, <void*>a, <void*>b, <void*>c, <void*> d)
*
* cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<<
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":749
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("PyDataType_SHAPE", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":750
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
__pyx_t_1 = (PyDataType_HASSUBARRAY(__pyx_v_d) != 0);
if (__pyx_t_1) {
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":751
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape # <<<<<<<<<<<<<<
* else:
* return ()
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape));
__pyx_r = ((PyObject*)__pyx_v_d->subarray->shape);
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":750
*
* cdef inline tuple PyDataType_SHAPE(dtype d):
* if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<<
* return <tuple>d.subarray.shape
* else:
*/
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":753
* return <tuple>d.subarray.shape
* else:
* return () # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_empty_tuple);
__pyx_r = __pyx_empty_tuple;
goto __pyx_L0;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":749
* return PyArray_MultiIterNew(5, <void*>a, <void*>b, <void*>c, <void*> d, <void*> e)
*
* cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<<
* if PyDataType_HASSUBARRAY(d):
* return <tuple>d.subarray.shape
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":868
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("set_array_base", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":869
*
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<<
* PyArray_SetBaseObject(arr, base)
*
*/
Py_INCREF(__pyx_v_base);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":870
* cdef inline void set_array_base(ndarray arr, object base):
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<<
*
* cdef inline object get_array_base(ndarray arr):
*/
(void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base));
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":868
* int _import_umath() except -1
*
* cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<<
* Py_INCREF(base) # important to do this before stealing the reference below!
* PyArray_SetBaseObject(arr, base)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":872
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) {
PyObject *__pyx_v_base;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("get_array_base", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":873
*
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr) # <<<<<<<<<<<<<<
* if base is NULL:
* return None
*/
__pyx_v_base = PyArray_BASE(__pyx_v_arr);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":874
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
__pyx_t_1 = ((__pyx_v_base == NULL) != 0);
if (__pyx_t_1) {
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":875
* base = PyArray_BASE(arr)
* if base is NULL:
* return None # <<<<<<<<<<<<<<
* return <object>base
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":874
* cdef inline object get_array_base(ndarray arr):
* base = PyArray_BASE(arr)
* if base is NULL: # <<<<<<<<<<<<<<
* return None
* return <object>base
*/
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":876
* if base is NULL:
* return None
* return <object>base # <<<<<<<<<<<<<<
*
* # Versions of the import_* functions which are more suitable for
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_base));
__pyx_r = ((PyObject *)__pyx_v_base);
goto __pyx_L0;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":872
* PyArray_SetBaseObject(arr, base)
*
* cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<<
* base = PyArray_BASE(arr)
* if base is NULL:
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":880
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_array", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":882
* cdef inline int import_array() except -1:
* try:
* __pyx_import_array() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import")
*/
__pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 882, __pyx_L3_error)
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":883
* try:
* __pyx_import_array()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.multiarray failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 883, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":884
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 884, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 884, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":881
* # Cython code.
* cdef inline int import_array() except -1:
* try: # <<<<<<<<<<<<<<
* __pyx_import_array()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":880
* # Versions of the import_* functions which are more suitable for
* # Cython code.
* cdef inline int import_array() except -1: # <<<<<<<<<<<<<<
* try:
* __pyx_import_array()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":886
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_umath", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":888
* cdef inline int import_umath() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 888, __pyx_L3_error)
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":889
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 889, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":890
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 890, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 890, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":887
*
* cdef inline int import_umath() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":886
* raise ImportError("numpy.core.multiarray failed to import")
*
* cdef inline int import_umath() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":892
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("import_ufunc", 0);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_1);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
/*try:*/ {
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":894
* cdef inline int import_ufunc() except -1:
* try:
* _import_umath() # <<<<<<<<<<<<<<
* except Exception:
* raise ImportError("numpy.core.umath failed to import")
*/
__pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 894, __pyx_L3_error)
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
}
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
goto __pyx_L8_try_end;
__pyx_L3_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":895
* try:
* _import_umath()
* except Exception: # <<<<<<<<<<<<<<
* raise ImportError("numpy.core.umath failed to import")
*
*/
__pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0])));
if (__pyx_t_4) {
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(1, 895, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_6);
__Pyx_GOTREF(__pyx_t_7);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":896
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef extern from *:
*/
__pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 896, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_Raise(__pyx_t_8, 0, 0, 0);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__PYX_ERR(1, 896, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":893
*
* cdef inline int import_ufunc() except -1:
* try: # <<<<<<<<<<<<<<
* _import_umath()
* except Exception:
*/
__Pyx_XGIVEREF(__pyx_t_1);
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3);
goto __pyx_L1_error;
__pyx_L8_try_end:;
}
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":892
* raise ImportError("numpy.core.umath failed to import")
*
* cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<<
* try:
* _import_umath()
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "carray.to_py":112
*
* @cname("__Pyx_carray_to_py_Py_ssize_t")
* cdef inline list __Pyx_carray_to_py_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<<
* cdef size_t i
* cdef object value
*/
static CYTHON_INLINE PyObject *__Pyx_carray_to_py_Py_ssize_t(Py_ssize_t *__pyx_v_v, Py_ssize_t __pyx_v_length) {
size_t __pyx_v_i;
PyObject *__pyx_v_value = 0;
PyObject *__pyx_v_l = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
size_t __pyx_t_2;
size_t __pyx_t_3;
size_t __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_carray_to_py_Py_ssize_t", 0);
/* "carray.to_py":115
* cdef size_t i
* cdef object value
* l = PyList_New(length) # <<<<<<<<<<<<<<
* for i in range(<size_t>length):
* value = v[i]
*/
__pyx_t_1 = PyList_New(__pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 115, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_l = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "carray.to_py":116
* cdef object value
* l = PyList_New(length)
* for i in range(<size_t>length): # <<<<<<<<<<<<<<
* value = v[i]
* Py_INCREF(value)
*/
__pyx_t_2 = ((size_t)__pyx_v_length);
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "carray.to_py":117
* l = PyList_New(length)
* for i in range(<size_t>length):
* value = v[i] # <<<<<<<<<<<<<<
* Py_INCREF(value)
* PyList_SET_ITEM(l, i, value)
*/
__pyx_t_1 = PyInt_FromSsize_t((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 117, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_value, __pyx_t_1);
__pyx_t_1 = 0;
/* "carray.to_py":118
* for i in range(<size_t>length):
* value = v[i]
* Py_INCREF(value) # <<<<<<<<<<<<<<
* PyList_SET_ITEM(l, i, value)
* return l
*/
Py_INCREF(__pyx_v_value);
/* "carray.to_py":119
* value = v[i]
* Py_INCREF(value)
* PyList_SET_ITEM(l, i, value) # <<<<<<<<<<<<<<
* return l
*
*/
PyList_SET_ITEM(__pyx_v_l, __pyx_v_i, __pyx_v_value);
}
/* "carray.to_py":120
* Py_INCREF(value)
* PyList_SET_ITEM(l, i, value)
* return l # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_l);
__pyx_r = __pyx_v_l;
goto __pyx_L0;
/* "carray.to_py":112
*
* @cname("__Pyx_carray_to_py_Py_ssize_t")
* cdef inline list __Pyx_carray_to_py_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<<
* cdef size_t i
* cdef object value
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("carray.to_py.__Pyx_carray_to_py_Py_ssize_t", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_value);
__Pyx_XDECREF(__pyx_v_l);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "carray.to_py":124
*
* @cname("__Pyx_carray_to_tuple_Py_ssize_t")
* cdef inline tuple __Pyx_carray_to_tuple_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<<
* cdef size_t i
* cdef object value
*/
static CYTHON_INLINE PyObject *__Pyx_carray_to_tuple_Py_ssize_t(Py_ssize_t *__pyx_v_v, Py_ssize_t __pyx_v_length) {
size_t __pyx_v_i;
PyObject *__pyx_v_value = 0;
PyObject *__pyx_v_t = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
size_t __pyx_t_2;
size_t __pyx_t_3;
size_t __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_carray_to_tuple_Py_ssize_t", 0);
/* "carray.to_py":127
* cdef size_t i
* cdef object value
* t = PyTuple_New(length) # <<<<<<<<<<<<<<
* for i in range(<size_t>length):
* value = v[i]
*/
__pyx_t_1 = PyTuple_New(__pyx_v_length); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 127, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_t = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "carray.to_py":128
* cdef object value
* t = PyTuple_New(length)
* for i in range(<size_t>length): # <<<<<<<<<<<<<<
* value = v[i]
* Py_INCREF(value)
*/
__pyx_t_2 = ((size_t)__pyx_v_length);
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "carray.to_py":129
* t = PyTuple_New(length)
* for i in range(<size_t>length):
* value = v[i] # <<<<<<<<<<<<<<
* Py_INCREF(value)
* PyTuple_SET_ITEM(t, i, value)
*/
__pyx_t_1 = PyInt_FromSsize_t((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 129, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XDECREF_SET(__pyx_v_value, __pyx_t_1);
__pyx_t_1 = 0;
/* "carray.to_py":130
* for i in range(<size_t>length):
* value = v[i]
* Py_INCREF(value) # <<<<<<<<<<<<<<
* PyTuple_SET_ITEM(t, i, value)
* return t
*/
Py_INCREF(__pyx_v_value);
/* "carray.to_py":131
* value = v[i]
* Py_INCREF(value)
* PyTuple_SET_ITEM(t, i, value) # <<<<<<<<<<<<<<
* return t
*/
PyTuple_SET_ITEM(__pyx_v_t, __pyx_v_i, __pyx_v_value);
}
/* "carray.to_py":132
* Py_INCREF(value)
* PyTuple_SET_ITEM(t, i, value)
* return t # <<<<<<<<<<<<<<
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_t);
__pyx_r = __pyx_v_t;
goto __pyx_L0;
/* "carray.to_py":124
*
* @cname("__Pyx_carray_to_tuple_Py_ssize_t")
* cdef inline tuple __Pyx_carray_to_tuple_Py_ssize_t(base_type *v, Py_ssize_t length): # <<<<<<<<<<<<<<
* cdef size_t i
* cdef object value
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("carray.to_py.__Pyx_carray_to_tuple_Py_ssize_t", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_value);
__Pyx_XDECREF(__pyx_v_t);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* Python wrapper */
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_shape = 0;
Py_ssize_t __pyx_v_itemsize;
PyObject *__pyx_v_format = 0;
PyObject *__pyx_v_mode = 0;
int __pyx_v_allocate_buffer;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0};
PyObject* values[5] = {0,0,0,0,0};
values[3] = ((PyObject *)__pyx_n_s_c);
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(2, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(2, 122, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 3:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_mode);
if (value) { values[3] = value; kw_args--; }
}
CYTHON_FALLTHROUGH;
case 4:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_allocate_buffer);
if (value) { values[4] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 122, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4);
CYTHON_FALLTHROUGH;
case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3);
CYTHON_FALLTHROUGH;
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_shape = ((PyObject*)values[0]);
__pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 122, __pyx_L3_error)
__pyx_v_format = values[2];
__pyx_v_mode = values[3];
if (values[4]) {
__pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 123, __pyx_L3_error)
} else {
/* "View.MemoryView":123
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None,
* mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<<
*
* cdef int idx
*/
__pyx_v_allocate_buffer = ((int)1);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 122, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(2, 122, __pyx_L1_error)
if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) {
PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(2, 122, __pyx_L1_error)
}
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer);
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
goto __pyx_L0;
__pyx_L1_error:;
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) {
int __pyx_v_idx;
Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_dim;
PyObject **__pyx_v_p;
char __pyx_v_order;
int __pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
char *__pyx_t_7;
int __pyx_t_8;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
Py_ssize_t __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
__Pyx_INCREF(__pyx_v_format);
/* "View.MemoryView":129
* cdef PyObject **p
*
* self.ndim = <int> len(shape) # <<<<<<<<<<<<<<
* self.itemsize = itemsize
*
*/
if (unlikely(__pyx_v_shape == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(2, 129, __pyx_L1_error)
}
__pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(2, 129, __pyx_L1_error)
__pyx_v_self->ndim = ((int)__pyx_t_1);
/* "View.MemoryView":130
*
* self.ndim = <int> len(shape)
* self.itemsize = itemsize # <<<<<<<<<<<<<<
*
* if not self.ndim:
*/
__pyx_v_self->itemsize = __pyx_v_itemsize;
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
__pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 133, __pyx_L1_error)
/* "View.MemoryView":132
* self.itemsize = itemsize
*
* if not self.ndim: # <<<<<<<<<<<<<<
* raise ValueError("Empty shape tuple for cython.array")
*
*/
}
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
__pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 136, __pyx_L1_error)
/* "View.MemoryView":135
* raise ValueError("Empty shape tuple for cython.array")
*
* if itemsize <= 0: # <<<<<<<<<<<<<<
* raise ValueError("itemsize <= 0 for cython.array")
*
*/
}
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
__pyx_t_2 = PyBytes_Check(__pyx_v_format);
__pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":139
*
* if not isinstance(format, bytes):
* format = format.encode('ASCII') # <<<<<<<<<<<<<<
* self._format = format # keep a reference to the byte string
* self.format = self._format
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_6)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_6);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
}
}
__pyx_t_3 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_5, __pyx_t_6, __pyx_n_s_ASCII) : __Pyx_PyObject_CallOneArg(__pyx_t_5, __pyx_n_s_ASCII);
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 139, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF_SET(__pyx_v_format, __pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":138
* raise ValueError("itemsize <= 0 for cython.array")
*
* if not isinstance(format, bytes): # <<<<<<<<<<<<<<
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
*/
}
/* "View.MemoryView":140
* if not isinstance(format, bytes):
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<<
* self.format = self._format
*
*/
if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(2, 140, __pyx_L1_error)
__pyx_t_3 = __pyx_v_format;
__Pyx_INCREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_3);
__Pyx_GOTREF(__pyx_v_self->_format);
__Pyx_DECREF(__pyx_v_self->_format);
__pyx_v_self->_format = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":141
* format = format.encode('ASCII')
* self._format = format # keep a reference to the byte string
* self.format = self._format # <<<<<<<<<<<<<<
*
*
*/
if (unlikely(__pyx_v_self->_format == Py_None)) {
PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found");
__PYX_ERR(2, 141, __pyx_L1_error)
}
__pyx_t_7 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_7) && PyErr_Occurred())) __PYX_ERR(2, 141, __pyx_L1_error)
__pyx_v_self->format = __pyx_t_7;
/* "View.MemoryView":144
*
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<<
* self._strides = self._shape + self.ndim
*
*/
__pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2)));
/* "View.MemoryView":145
*
* self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2)
* self._strides = self._shape + self.ndim # <<<<<<<<<<<<<<
*
* if not self._shape:
*/
__pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim);
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 148, __pyx_L1_error)
/* "View.MemoryView":147
* self._strides = self._shape + self.ndim
*
* if not self._shape: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate shape and strides.")
*
*/
}
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
__pyx_t_8 = 0;
__pyx_t_3 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0;
for (;;) {
if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(2, 151, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 151, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 151, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_9;
__pyx_v_idx = __pyx_t_8;
__pyx_t_8 = (__pyx_t_8 + 1);
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
__pyx_t_4 = ((__pyx_v_dim <= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":153
* for idx, dim in enumerate(shape):
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<<
* self._shape[idx] = dim
*
*/
__pyx_t_5 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_6);
__pyx_t_5 = 0;
__pyx_t_6 = 0;
__pyx_t_6 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 153, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 153, __pyx_L1_error)
/* "View.MemoryView":152
*
* for idx, dim in enumerate(shape):
* if dim <= 0: # <<<<<<<<<<<<<<
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim
*/
}
/* "View.MemoryView":154
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
* self._shape[idx] = dim # <<<<<<<<<<<<<<
*
* cdef char order
*/
(__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim;
/* "View.MemoryView":151
*
*
* for idx, dim in enumerate(shape): # <<<<<<<<<<<<<<
* if dim <= 0:
* raise ValueError("Invalid shape in axis %d: %d." % (idx, dim))
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 157, __pyx_L1_error)
if (__pyx_t_4) {
/* "View.MemoryView":158
* cdef char order
* if mode == 'fortran':
* order = b'F' # <<<<<<<<<<<<<<
* self.mode = u'fortran'
* elif mode == 'c':
*/
__pyx_v_order = 'F';
/* "View.MemoryView":159
* if mode == 'fortran':
* order = b'F'
* self.mode = u'fortran' # <<<<<<<<<<<<<<
* elif mode == 'c':
* order = b'C'
*/
__Pyx_INCREF(__pyx_n_u_fortran);
__Pyx_GIVEREF(__pyx_n_u_fortran);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_fortran;
/* "View.MemoryView":157
*
* cdef char order
* if mode == 'fortran': # <<<<<<<<<<<<<<
* order = b'F'
* self.mode = u'fortran'
*/
goto __pyx_L10;
}
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
__pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(2, 160, __pyx_L1_error)
if (likely(__pyx_t_4)) {
/* "View.MemoryView":161
* self.mode = u'fortran'
* elif mode == 'c':
* order = b'C' # <<<<<<<<<<<<<<
* self.mode = u'c'
* else:
*/
__pyx_v_order = 'C';
/* "View.MemoryView":162
* elif mode == 'c':
* order = b'C'
* self.mode = u'c' # <<<<<<<<<<<<<<
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*/
__Pyx_INCREF(__pyx_n_u_c);
__Pyx_GIVEREF(__pyx_n_u_c);
__Pyx_GOTREF(__pyx_v_self->mode);
__Pyx_DECREF(__pyx_v_self->mode);
__pyx_v_self->mode = __pyx_n_u_c;
/* "View.MemoryView":160
* order = b'F'
* self.mode = u'fortran'
* elif mode == 'c': # <<<<<<<<<<<<<<
* order = b'C'
* self.mode = u'c'
*/
goto __pyx_L10;
}
/* "View.MemoryView":164
* self.mode = u'c'
* else:
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<<
*
* self.len = fill_contig_strides_array(self._shape, self._strides,
*/
/*else*/ {
__pyx_t_3 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_10 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 164, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 164, __pyx_L1_error)
}
__pyx_L10:;
/* "View.MemoryView":166
* raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode)
*
* self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<<
* itemsize, self.ndim, order)
*
*/
__pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order);
/* "View.MemoryView":169
* itemsize, self.ndim, order)
*
* self.free_data = allocate_buffer # <<<<<<<<<<<<<<
* self.dtype_is_object = format == b'O'
* if allocate_buffer:
*/
__pyx_v_self->free_data = __pyx_v_allocate_buffer;
/* "View.MemoryView":170
*
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<<
* if allocate_buffer:
*
*/
__pyx_t_10 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_10); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 170, __pyx_L1_error)
__pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_10); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 170, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__pyx_v_self->dtype_is_object = __pyx_t_4;
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_4 = (__pyx_v_allocate_buffer != 0);
if (__pyx_t_4) {
/* "View.MemoryView":174
*
*
* self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<<
* if not self.data:
* raise MemoryError("unable to allocate array data.")
*/
__pyx_v_self->data = ((char *)malloc(__pyx_v_self->len));
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
__pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(2, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_10);
__Pyx_Raise(__pyx_t_10, 0, 0, 0);
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
__PYX_ERR(2, 176, __pyx_L1_error)
/* "View.MemoryView":175
*
* self.data = <char *>malloc(self.len)
* if not self.data: # <<<<<<<<<<<<<<
* raise MemoryError("unable to allocate array data.")
*
*/
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
__pyx_t_4 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_4) {
/* "View.MemoryView":179
*
* if self.dtype_is_object:
* p = <PyObject **> self.data # <<<<<<<<<<<<<<
* for i in range(self.len / itemsize):
* p[i] = Py_None
*/
__pyx_v_p = ((PyObject **)__pyx_v_self->data);
/* "View.MemoryView":180
* if self.dtype_is_object:
* p = <PyObject **> self.data
* for i in range(self.len / itemsize): # <<<<<<<<<<<<<<
* p[i] = Py_None
* Py_INCREF(Py_None)
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(2, 180, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(2, 180, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize);
__pyx_t_9 = __pyx_t_1;
for (__pyx_t_11 = 0; __pyx_t_11 < __pyx_t_9; __pyx_t_11+=1) {
__pyx_v_i = __pyx_t_11;
/* "View.MemoryView":181
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
* p[i] = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
(__pyx_v_p[__pyx_v_i]) = Py_None;
/* "View.MemoryView":182
* for i in range(self.len / itemsize):
* p[i] = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
Py_INCREF(Py_None);
}
/* "View.MemoryView":178
* raise MemoryError("unable to allocate array data.")
*
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* p = <PyObject **> self.data
* for i in range(self.len / itemsize):
*/
}
/* "View.MemoryView":171
* self.free_data = allocate_buffer
* self.dtype_is_object = format == b'O'
* if allocate_buffer: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":122
* cdef bint dtype_is_object
*
* def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<<
* mode="c", bint allocate_buffer=True):
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_format);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_v_bufmode;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
char *__pyx_t_4;
Py_ssize_t __pyx_t_5;
int __pyx_t_6;
Py_ssize_t *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":186
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1 # <<<<<<<<<<<<<<
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = -1;
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
__pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 187, __pyx_L1_error)
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":188
* cdef int bufmode = -1
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
*/
__pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":187
* def __getbuffer__(self, Py_buffer *info, int flags):
* cdef int bufmode = -1
* if self.mode == u"c": # <<<<<<<<<<<<<<
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
*/
goto __pyx_L3;
}
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
__pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 189, __pyx_L1_error)
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":190
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<<
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
*/
__pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS);
/* "View.MemoryView":189
* if self.mode == u"c":
* bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* elif self.mode == u"fortran": # <<<<<<<<<<<<<<
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
*/
}
__pyx_L3:;
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
__pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 192, __pyx_L1_error)
/* "View.MemoryView":191
* elif self.mode == u"fortran":
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode): # <<<<<<<<<<<<<<
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
*/
}
/* "View.MemoryView":193
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data # <<<<<<<<<<<<<<
* info.len = self.len
* info.ndim = self.ndim
*/
__pyx_t_4 = __pyx_v_self->data;
__pyx_v_info->buf = __pyx_t_4;
/* "View.MemoryView":194
* raise ValueError("Can only create a buffer that is contiguous in memory.")
* info.buf = self.data
* info.len = self.len # <<<<<<<<<<<<<<
* info.ndim = self.ndim
* info.shape = self._shape
*/
__pyx_t_5 = __pyx_v_self->len;
__pyx_v_info->len = __pyx_t_5;
/* "View.MemoryView":195
* info.buf = self.data
* info.len = self.len
* info.ndim = self.ndim # <<<<<<<<<<<<<<
* info.shape = self._shape
* info.strides = self._strides
*/
__pyx_t_6 = __pyx_v_self->ndim;
__pyx_v_info->ndim = __pyx_t_6;
/* "View.MemoryView":196
* info.len = self.len
* info.ndim = self.ndim
* info.shape = self._shape # <<<<<<<<<<<<<<
* info.strides = self._strides
* info.suboffsets = NULL
*/
__pyx_t_7 = __pyx_v_self->_shape;
__pyx_v_info->shape = __pyx_t_7;
/* "View.MemoryView":197
* info.ndim = self.ndim
* info.shape = self._shape
* info.strides = self._strides # <<<<<<<<<<<<<<
* info.suboffsets = NULL
* info.itemsize = self.itemsize
*/
__pyx_t_7 = __pyx_v_self->_strides;
__pyx_v_info->strides = __pyx_t_7;
/* "View.MemoryView":198
* info.shape = self._shape
* info.strides = self._strides
* info.suboffsets = NULL # <<<<<<<<<<<<<<
* info.itemsize = self.itemsize
* info.readonly = 0
*/
__pyx_v_info->suboffsets = NULL;
/* "View.MemoryView":199
* info.strides = self._strides
* info.suboffsets = NULL
* info.itemsize = self.itemsize # <<<<<<<<<<<<<<
* info.readonly = 0
*
*/
__pyx_t_5 = __pyx_v_self->itemsize;
__pyx_v_info->itemsize = __pyx_t_5;
/* "View.MemoryView":200
* info.suboffsets = NULL
* info.itemsize = self.itemsize
* info.readonly = 0 # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
__pyx_v_info->readonly = 0;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":203
*
* if flags & PyBUF_FORMAT:
* info.format = self.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_4 = __pyx_v_self->format;
__pyx_v_info->format = __pyx_t_4;
/* "View.MemoryView":202
* info.readonly = 0
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.format
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":205
* info.format = self.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.obj = self
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L5:;
/* "View.MemoryView":207
* info.format = NULL
*
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":185
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* cdef int bufmode = -1
* if self.mode == u"c":
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* Python wrapper */
static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_array___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
__pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":213
* def __dealloc__(array self):
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data) # <<<<<<<<<<<<<<
* elif self.free_data:
* if self.dtype_is_object:
*/
__pyx_v_self->callback_free_data(__pyx_v_self->data);
/* "View.MemoryView":212
*
* def __dealloc__(array self):
* if self.callback_free_data != NULL: # <<<<<<<<<<<<<<
* self.callback_free_data(self.data)
* elif self.free_data:
*/
goto __pyx_L3;
}
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
__pyx_t_1 = (__pyx_v_self->free_data != 0);
if (__pyx_t_1) {
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
__pyx_t_1 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":216
* elif self.free_data:
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<<
* self._strides, self.ndim, False)
* free(self.data)
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0);
/* "View.MemoryView":215
* self.callback_free_data(self.data)
* elif self.free_data:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
*/
}
/* "View.MemoryView":218
* refcount_objects_in_slice(self.data, self._shape,
* self._strides, self.ndim, False)
* free(self.data) # <<<<<<<<<<<<<<
* PyObject_Free(self._shape)
*
*/
free(__pyx_v_self->data);
/* "View.MemoryView":214
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
* elif self.free_data: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* refcount_objects_in_slice(self.data, self._shape,
*/
}
__pyx_L3:;
/* "View.MemoryView":219
* self._strides, self.ndim, False)
* free(self.data)
* PyObject_Free(self._shape) # <<<<<<<<<<<<<<
*
* @property
*/
PyObject_Free(__pyx_v_self->_shape);
/* "View.MemoryView":211
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)")
*
* def __dealloc__(array self): # <<<<<<<<<<<<<<
* if self.callback_free_data != NULL:
* self.callback_free_data(self.data)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":223
* @property
* def memview(self):
* return self.get_memview() # <<<<<<<<<<<<<<
*
* @cname('get_memview')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 223, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":222
*
* @property
* def memview(self): # <<<<<<<<<<<<<<
* return self.get_memview()
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) {
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_memview", 0);
/* "View.MemoryView":227
* @cname('get_memview')
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<<
* return memoryview(self, flags, self.dtype_is_object)
*
*/
__pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE);
/* "View.MemoryView":228
* cdef get_memview(self):
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 228, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":226
*
* @cname('get_memview')
* cdef get_memview(self): # <<<<<<<<<<<<<<
* flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE
* return memoryview(self, flags, self.dtype_is_object)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* Python wrapper */
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":231
*
* def __len__(self):
* return self._shape[0] # <<<<<<<<<<<<<<
*
* def __getattr__(self, attr):
*/
__pyx_r = (__pyx_v_self->_shape[0]);
goto __pyx_L0;
/* "View.MemoryView":230
* return memoryview(self, flags, self.dtype_is_object)
*
* def __len__(self): # <<<<<<<<<<<<<<
* return self._shape[0]
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/
static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getattr__", 0);
/* "View.MemoryView":234
*
* def __getattr__(self, attr):
* return getattr(self.memview, attr) # <<<<<<<<<<<<<<
*
* def __getitem__(self, item):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 234, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":233
* return self._shape[0]
*
* def __getattr__(self, attr): # <<<<<<<<<<<<<<
* return getattr(self.memview, attr)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* Python wrapper */
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/
static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":237
*
* def __getitem__(self, item):
* return self.memview[item] # <<<<<<<<<<<<<<
*
* def __setitem__(self, item, value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 237, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":236
* return getattr(self.memview, attr)
*
* def __getitem__(self, item): # <<<<<<<<<<<<<<
* return self.memview[item]
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* Python wrapper */
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/
static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
/* "View.MemoryView":240
*
* def __setitem__(self, item, value):
* self.memview[item] = value # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 240, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(2, 240, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":239
* return self.memview[item]
*
* def __setitem__(self, item, value): # <<<<<<<<<<<<<<
* self.memview[item] = value
*
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) {
struct __pyx_array_obj *__pyx_v_result = 0;
struct __pyx_array_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("array_cwrapper", 0);
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
__pyx_t_1 = ((__pyx_v_buf == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":249
*
* if buf == NULL:
* result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<<
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4);
__pyx_t_2 = 0;
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 249, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":248
* cdef array result
*
* if buf == NULL: # <<<<<<<<<<<<<<
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
/*else*/ {
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_v_shape);
__Pyx_GIVEREF(__pyx_v_shape);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3);
__pyx_t_4 = 0;
__pyx_t_5 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":252
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False) # <<<<<<<<<<<<<<
* result.data = buf
*
*/
__pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 252, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(2, 252, __pyx_L1_error)
/* "View.MemoryView":251
* result = array(shape, itemsize, format, mode.decode('ASCII'))
* else:
* result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<<
* allocate_buffer=False)
* result.data = buf
*/
__pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 251, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5);
__pyx_t_5 = 0;
/* "View.MemoryView":253
* result = array(shape, itemsize, format, mode.decode('ASCII'),
* allocate_buffer=False)
* result.data = buf # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->data = __pyx_v_buf;
}
__pyx_L3:;
/* "View.MemoryView":255
* result.data = buf
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":244
*
* @cname("__pyx_array_new")
* cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<<
* char *mode, char *buf):
* cdef array result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* Python wrapper */
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_name = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0};
PyObject* values[1] = {0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(2, 281, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 1) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
}
__pyx_v_name = values[0];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 281, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__init__", 0);
/* "View.MemoryView":282
* cdef object name
* def __init__(self, name):
* self.name = name # <<<<<<<<<<<<<<
* def __repr__(self):
* return self.name
*/
__Pyx_INCREF(__pyx_v_name);
__Pyx_GIVEREF(__pyx_v_name);
__Pyx_GOTREF(__pyx_v_self->name);
__Pyx_DECREF(__pyx_v_self->name);
__pyx_v_self->name = __pyx_v_name;
/* "View.MemoryView":281
* cdef class Enum(object):
* cdef object name
* def __init__(self, name): # <<<<<<<<<<<<<<
* self.name = name
* def __repr__(self):
*/
/* function exit code */
__pyx_r = 0;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* Python wrapper */
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":284
* self.name = name
* def __repr__(self):
* return self.name # <<<<<<<<<<<<<<
*
* cdef generic = Enum("<strided and direct or indirect>")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->name);
__pyx_r = __pyx_v_self->name;
goto __pyx_L0;
/* "View.MemoryView":283
* def __init__(self, name):
* self.name = name
* def __repr__(self): # <<<<<<<<<<<<<<
* return self.name
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) {
PyObject *__pyx_v_state = 0;
PyObject *__pyx_v__dict = 0;
int __pyx_v_use_setstate;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":5
* cdef object _dict
* cdef bint use_setstate
* state = (self.name,) # <<<<<<<<<<<<<<
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v_self->name);
__Pyx_GIVEREF(__pyx_v_self->name);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name);
__pyx_v_state = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "(tree fragment)":6
* cdef bint use_setstate
* state = (self.name,)
* _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<<
* if _dict is not None:
* state += (_dict,)
*/
__pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v__dict = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
__pyx_t_2 = (__pyx_v__dict != Py_None);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "(tree fragment)":8
* _dict = getattr(self, '__dict__', None)
* if _dict is not None:
* state += (_dict,) # <<<<<<<<<<<<<<
* use_setstate = True
* else:
*/
__pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(__pyx_v__dict);
__Pyx_GIVEREF(__pyx_v__dict);
PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict);
__pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 8, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4));
__pyx_t_4 = 0;
/* "(tree fragment)":9
* if _dict is not None:
* state += (_dict,)
* use_setstate = True # <<<<<<<<<<<<<<
* else:
* use_setstate = self.name is not None
*/
__pyx_v_use_setstate = 1;
/* "(tree fragment)":7
* state = (self.name,)
* _dict = getattr(self, '__dict__', None)
* if _dict is not None: # <<<<<<<<<<<<<<
* state += (_dict,)
* use_setstate = True
*/
goto __pyx_L3;
}
/* "(tree fragment)":11
* use_setstate = True
* else:
* use_setstate = self.name is not None # <<<<<<<<<<<<<<
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_self->name != Py_None);
__pyx_v_use_setstate = __pyx_t_3;
}
__pyx_L3:;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
__pyx_t_3 = (__pyx_v_use_setstate != 0);
if (__pyx_t_3) {
/* "(tree fragment)":13
* use_setstate = self.name is not None
* if use_setstate:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<<
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None);
__pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 13, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state);
__pyx_t_4 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "(tree fragment)":12
* else:
* use_setstate = self.name is not None
* if use_setstate: # <<<<<<<<<<<<<<
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
*/
}
/* "(tree fragment)":15
* return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))));
__Pyx_INCREF(__pyx_int_184977713);
__Pyx_GIVEREF(__pyx_int_184977713);
PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713);
__Pyx_INCREF(__pyx_v_state);
__Pyx_GIVEREF(__pyx_v_state);
PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state);
__pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 15, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_5);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1);
__pyx_t_5 = 0;
__pyx_t_1 = 0;
__pyx_r = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L0;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* cdef tuple state
* cdef object _dict
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_state);
__Pyx_XDECREF(__pyx_v__dict);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":17
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state):
* __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<<
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 17, __pyx_L1_error)
__pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":16
* else:
* return __pyx_unpickle_Enum, (type(self), 0xb068931, state)
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(self, __pyx_state)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) {
Py_intptr_t __pyx_v_aligned_p;
size_t __pyx_v_offset;
void *__pyx_r;
int __pyx_t_1;
/* "View.MemoryView":300
* cdef void *align_pointer(void *memory, size_t alignment) nogil:
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<<
* cdef size_t offset
*
*/
__pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory);
/* "View.MemoryView":304
*
* with cython.cdivision(True):
* offset = aligned_p % alignment # <<<<<<<<<<<<<<
*
* if offset > 0:
*/
__pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment);
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
__pyx_t_1 = ((__pyx_v_offset > 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":307
*
* if offset > 0:
* aligned_p += alignment - offset # <<<<<<<<<<<<<<
*
* return <void *> aligned_p
*/
__pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset));
/* "View.MemoryView":306
* offset = aligned_p % alignment
*
* if offset > 0: # <<<<<<<<<<<<<<
* aligned_p += alignment - offset
*
*/
}
/* "View.MemoryView":309
* aligned_p += alignment - offset
*
* return <void *> aligned_p # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = ((void *)__pyx_v_aligned_p);
goto __pyx_L0;
/* "View.MemoryView":298
*
* @cname('__pyx_align_pointer')
* cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<<
* "Align pointer memory on a given boundary"
* cdef Py_intptr_t aligned_p = <Py_intptr_t> memory
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* Python wrapper */
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v_obj = 0;
int __pyx_v_flags;
int __pyx_v_dtype_is_object;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(2, 345, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (kw_args > 0) {
PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_dtype_is_object);
if (value) { values[2] = value; kw_args--; }
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(2, 345, __pyx_L3_error)
}
} else {
switch (PyTuple_GET_SIZE(__pyx_args)) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
break;
default: goto __pyx_L5_argtuple_error;
}
}
__pyx_v_obj = values[0];
__pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
if (values[2]) {
__pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 345, __pyx_L3_error)
} else {
__pyx_v_dtype_is_object = ((int)0);
}
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 345, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return -1;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__cinit__", 0);
/* "View.MemoryView":346
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj # <<<<<<<<<<<<<<
* self.flags = flags
* if type(self) is memoryview or obj is not None:
*/
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
__Pyx_GOTREF(__pyx_v_self->obj);
__Pyx_DECREF(__pyx_v_self->obj);
__pyx_v_self->obj = __pyx_v_obj;
/* "View.MemoryView":347
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False):
* self.obj = obj
* self.flags = flags # <<<<<<<<<<<<<<
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
*/
__pyx_v_self->flags = __pyx_v_flags;
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
__pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type));
__pyx_t_3 = (__pyx_t_2 != 0);
if (!__pyx_t_3) {
} else {
__pyx_t_1 = __pyx_t_3;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_3 = (__pyx_v_obj != Py_None);
__pyx_t_2 = (__pyx_t_3 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (__pyx_t_1) {
/* "View.MemoryView":349
* self.flags = flags
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<<
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
*/
__pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 349, __pyx_L1_error)
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":351
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None;
/* "View.MemoryView":352
* if <PyObject *> self.view.obj == NULL:
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* global __pyx_memoryview_thread_locks_used
*/
Py_INCREF(Py_None);
/* "View.MemoryView":350
* if type(self) is memoryview or obj is not None:
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &self.view).obj = Py_None
* Py_INCREF(Py_None)
*/
}
/* "View.MemoryView":348
* self.obj = obj
* self.flags = flags
* if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<<
* __Pyx_GetBuffer(obj, &self.view, flags)
* if <PyObject *> self.view.obj == NULL:
*/
}
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
__pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":356
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
*/
__pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
/* "View.MemoryView":357
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED:
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<<
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1);
/* "View.MemoryView":355
*
* global __pyx_memoryview_thread_locks_used
* if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<<
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":359
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<<
* if self.lock is NULL:
* raise MemoryError
*/
__pyx_v_self->lock = PyThread_allocate_lock();
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
__pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":361
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
* raise MemoryError # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
PyErr_NoMemory(); __PYX_ERR(2, 361, __pyx_L1_error)
/* "View.MemoryView":360
* if self.lock is NULL:
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL: # <<<<<<<<<<<<<<
* raise MemoryError
*
*/
}
/* "View.MemoryView":358
* self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]
* __pyx_memoryview_thread_locks_used += 1
* if self.lock is NULL: # <<<<<<<<<<<<<<
* self.lock = PyThread_allocate_lock()
* if self.lock is NULL:
*/
}
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":364
*
* if flags & PyBUF_FORMAT:
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<<
* else:
* self.dtype_is_object = dtype_is_object
*/
__pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_self->dtype_is_object = __pyx_t_1;
/* "View.MemoryView":363
* raise MemoryError
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
*/
goto __pyx_L10;
}
/* "View.MemoryView":366
* self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0')
* else:
* self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<<
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
*/
/*else*/ {
__pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object;
}
__pyx_L10:;
/* "View.MemoryView":368
* self.dtype_is_object = dtype_is_object
*
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<<
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL
*/
__pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int))));
/* "View.MemoryView":370
* self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer(
* <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int))
* self.typeinfo = NULL # <<<<<<<<<<<<<<
*
* def __dealloc__(memoryview self):
*/
__pyx_v_self->typeinfo = NULL;
/* "View.MemoryView":345
* cdef __Pyx_TypeInfo *typeinfo
*
* def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<<
* self.obj = obj
* self.flags = flags
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* Python wrapper */
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) {
int __pyx_v_i;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyThread_type_lock __pyx_t_6;
PyThread_type_lock __pyx_t_7;
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
__pyx_t_1 = (__pyx_v_self->obj != Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":374
* def __dealloc__(memoryview self):
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<<
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
*/
__Pyx_ReleaseBuffer((&__pyx_v_self->view));
/* "View.MemoryView":373
*
* def __dealloc__(memoryview self):
* if self.obj is not None: # <<<<<<<<<<<<<<
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*/
goto __pyx_L3;
}
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
__pyx_t_2 = ((((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":377
* elif (<__pyx_buffer *> &self.view).obj == Py_None:
*
* (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<<
* Py_DECREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_self->view))->obj = NULL;
/* "View.MemoryView":378
*
* (<__pyx_buffer *> &self.view).obj = NULL
* Py_DECREF(Py_None) # <<<<<<<<<<<<<<
*
* cdef int i
*/
Py_DECREF(Py_None);
/* "View.MemoryView":375
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
* elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<<
*
* (<__pyx_buffer *> &self.view).obj = NULL
*/
}
__pyx_L3:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
__pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":383
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<<
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
*/
__pyx_t_3 = __pyx_memoryview_thread_locks_used;
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) {
__pyx_v_i = __pyx_t_5;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
__pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":385
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<<
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
*/
__pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1);
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
__pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":388
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<<
* break
* else:
*/
__pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]);
__pyx_t_7 = (__pyx_memoryview_thread_locks[__pyx_v_i]);
/* "View.MemoryView":387
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break
*/
(__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_6;
(__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_7;
/* "View.MemoryView":386
* if __pyx_memoryview_thread_locks[i] is self.lock:
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
*/
}
/* "View.MemoryView":389
* __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = (
* __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i])
* break # <<<<<<<<<<<<<<
* else:
* PyThread_free_lock(self.lock)
*/
goto __pyx_L6_break;
/* "View.MemoryView":384
* if self.lock != NULL:
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<<
* __pyx_memoryview_thread_locks_used -= 1
* if i != __pyx_memoryview_thread_locks_used:
*/
}
}
/*else*/ {
/* "View.MemoryView":391
* break
* else:
* PyThread_free_lock(self.lock) # <<<<<<<<<<<<<<
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
*/
PyThread_free_lock(__pyx_v_self->lock);
}
__pyx_L6_break:;
/* "View.MemoryView":382
* cdef int i
* global __pyx_memoryview_thread_locks_used
* if self.lock != NULL: # <<<<<<<<<<<<<<
* for i in range(__pyx_memoryview_thread_locks_used):
* if __pyx_memoryview_thread_locks[i] is self.lock:
*/
}
/* "View.MemoryView":372
* self.typeinfo = NULL
*
* def __dealloc__(memoryview self): # <<<<<<<<<<<<<<
* if self.obj is not None:
* __Pyx_ReleaseBuffer(&self.view)
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
Py_ssize_t __pyx_v_dim;
char *__pyx_v_itemp;
PyObject *__pyx_v_idx = NULL;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t __pyx_t_3;
PyObject *(*__pyx_t_4)(PyObject *);
PyObject *__pyx_t_5 = NULL;
Py_ssize_t __pyx_t_6;
char *__pyx_t_7;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_item_pointer", 0);
/* "View.MemoryView":395
* cdef char *get_item_pointer(memoryview self, object index) except NULL:
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<<
*
* for dim, idx in enumerate(index):
*/
__pyx_v_itemp = ((char *)__pyx_v_self->view.buf);
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
__pyx_t_1 = 0;
if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) {
__pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0;
__pyx_t_4 = NULL;
} else {
__pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 397, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_4)) {
if (likely(PyList_CheckExact(__pyx_t_2))) {
if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
} else {
if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(2, 397, __pyx_L1_error)
#else
__pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 397, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
}
} else {
__pyx_t_5 = __pyx_t_4(__pyx_t_2);
if (unlikely(!__pyx_t_5)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 397, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_5);
}
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5);
__pyx_t_5 = 0;
__pyx_v_dim = __pyx_t_1;
__pyx_t_1 = (__pyx_t_1 + 1);
/* "View.MemoryView":398
*
* for dim, idx in enumerate(index):
* itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<<
*
* return itemp
*/
__pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 398, __pyx_L1_error)
__pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(2, 398, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_7;
/* "View.MemoryView":397
* cdef char *itemp = <char *> self.view.buf
*
* for dim, idx in enumerate(index): # <<<<<<<<<<<<<<
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
*/
}
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":400
* itemp = pybuffer_index(&self.view, itemp, idx, dim)
*
* return itemp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_itemp;
goto __pyx_L0;
/* "View.MemoryView":393
* PyThread_free_lock(self.lock)
*
* cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<<
* cdef Py_ssize_t dim
* cdef char *itemp = <char *> self.view.buf
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/
static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_indices = NULL;
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
char *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__getitem__", 0);
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
__pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":405
* def __getitem__(memoryview self, object index):
* if index is Ellipsis:
* return self # <<<<<<<<<<<<<<
*
* have_slices, indices = _unellipsify(index, self.view.ndim)
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__pyx_r = ((PyObject *)__pyx_v_self);
goto __pyx_L0;
/* "View.MemoryView":404
*
* def __getitem__(memoryview self, object index):
* if index is Ellipsis: # <<<<<<<<<<<<<<
* return self
*
*/
}
/* "View.MemoryView":407
* return self
*
* have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* cdef char *itemp
*/
__pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (likely(__pyx_t_3 != Py_None)) {
PyObject* sequence = __pyx_t_3;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(2, 407, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_5 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
#else
__pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 407, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
#endif
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 407, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_v_indices = __pyx_t_5;
__pyx_t_5 = 0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
__pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(2, 410, __pyx_L1_error)
if (__pyx_t_2) {
/* "View.MemoryView":411
* cdef char *itemp
* if have_slices:
* return memview_slice(self, indices) # <<<<<<<<<<<<<<
* else:
* itemp = self.get_item_pointer(indices)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 411, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":410
*
* cdef char *itemp
* if have_slices: # <<<<<<<<<<<<<<
* return memview_slice(self, indices)
* else:
*/
}
/* "View.MemoryView":413
* return memview_slice(self, indices)
* else:
* itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<<
* return self.convert_item_to_object(itemp)
*
*/
/*else*/ {
__pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(2, 413, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_6;
/* "View.MemoryView":414
* else:
* itemp = self.get_item_pointer(indices)
* return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<<
*
* def __setitem__(memoryview self, object index, object value):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 414, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":403
*
*
* def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<<
* if index is Ellipsis:
* return self
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_indices);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* Python wrapper */
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/
static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
PyObject *__pyx_v_have_slices = NULL;
PyObject *__pyx_v_obj = NULL;
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setitem__", 0);
__Pyx_INCREF(__pyx_v_index);
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
__pyx_t_1 = (__pyx_v_self->view.readonly != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 418, __pyx_L1_error)
/* "View.MemoryView":417
*
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly: # <<<<<<<<<<<<<<
* raise TypeError("Cannot assign to read-only memoryview")
*
*/
}
/* "View.MemoryView":420
* raise TypeError("Cannot assign to read-only memoryview")
*
* have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<<
*
* if have_slices:
*/
__pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (likely(__pyx_t_2 != Py_None)) {
PyObject* sequence = __pyx_t_2;
Py_ssize_t size = __Pyx_PySequence_SIZE(sequence);
if (unlikely(size != 2)) {
if (size > 2) __Pyx_RaiseTooManyValuesError(2);
else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size);
__PYX_ERR(2, 420, __pyx_L1_error)
}
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_3 = PyTuple_GET_ITEM(sequence, 0);
__pyx_t_4 = PyTuple_GET_ITEM(sequence, 1);
__Pyx_INCREF(__pyx_t_3);
__Pyx_INCREF(__pyx_t_4);
#else
__pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 420, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
#endif
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
} else {
__Pyx_RaiseNoneNotIterableError(); __PYX_ERR(2, 420, __pyx_L1_error)
}
__pyx_v_have_slices = __pyx_t_3;
__pyx_t_3 = 0;
__Pyx_DECREF_SET(__pyx_v_index, __pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 422, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":423
*
* if have_slices:
* obj = self.is_slice(value) # <<<<<<<<<<<<<<
* if obj:
* self.setitem_slice_assignment(self[index], obj)
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 423, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_v_obj = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 424, __pyx_L1_error)
if (__pyx_t_1) {
/* "View.MemoryView":425
* obj = self.is_slice(value)
* if obj:
* self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<<
* else:
* self.setitem_slice_assign_scalar(self[index], value)
*/
__pyx_t_2 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_2, __pyx_v_obj); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 425, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
/* "View.MemoryView":424
* if have_slices:
* obj = self.is_slice(value)
* if obj: # <<<<<<<<<<<<<<
* self.setitem_slice_assignment(self[index], obj)
* else:
*/
goto __pyx_L5;
}
/* "View.MemoryView":427
* self.setitem_slice_assignment(self[index], obj)
* else:
* self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<<
* else:
* self.setitem_indexed(index, value)
*/
/*else*/ {
__pyx_t_4 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
if (!(likely(((__pyx_t_4) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_4, __pyx_memoryview_type))))) __PYX_ERR(2, 427, __pyx_L1_error)
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_4), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 427, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L5:;
/* "View.MemoryView":422
* have_slices, index = _unellipsify(index, self.view.ndim)
*
* if have_slices: # <<<<<<<<<<<<<<
* obj = self.is_slice(value)
* if obj:
*/
goto __pyx_L4;
}
/* "View.MemoryView":429
* self.setitem_slice_assign_scalar(self[index], value)
* else:
* self.setitem_indexed(index, value) # <<<<<<<<<<<<<<
*
* cdef is_slice(self, obj):
*/
/*else*/ {
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 429, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_L4:;
/* "View.MemoryView":416
* return self.convert_item_to_object(itemp)
*
* def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<<
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_have_slices);
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_slice", 0);
__Pyx_INCREF(__pyx_v_obj);
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_5);
/*try:*/ {
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":435
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object) # <<<<<<<<<<<<<<
* except TypeError:
* return None
*/
__pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 435, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
/* "View.MemoryView":434
* if not isinstance(obj, memoryview):
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<<
* self.dtype_is_object)
* except TypeError:
*/
__pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_8);
__Pyx_INCREF(__pyx_v_obj);
__Pyx_GIVEREF(__pyx_v_obj);
PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj);
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6);
__Pyx_GIVEREF(__pyx_t_7);
PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7);
__pyx_t_6 = 0;
__pyx_t_7 = 0;
__pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 434, __pyx_L4_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7);
__pyx_t_7 = 0;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
}
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
goto __pyx_L9_try_end;
__pyx_L4_error:;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
/* "View.MemoryView":436
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
* except TypeError: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError);
if (__pyx_t_9) {
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(2, 436, __pyx_L6_except_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_GOTREF(__pyx_t_8);
__Pyx_GOTREF(__pyx_t_6);
/* "View.MemoryView":437
* self.dtype_is_object)
* except TypeError:
* return None # <<<<<<<<<<<<<<
*
* return obj
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
goto __pyx_L7_except_return;
}
goto __pyx_L6_except_error;
__pyx_L6_except_error:;
/* "View.MemoryView":433
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview):
* try: # <<<<<<<<<<<<<<
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
* self.dtype_is_object)
*/
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L1_error;
__pyx_L7_except_return:;
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_XGIVEREF(__pyx_t_5);
__Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5);
goto __pyx_L0;
__pyx_L9_try_end:;
}
/* "View.MemoryView":432
*
* cdef is_slice(self, obj):
* if not isinstance(obj, memoryview): # <<<<<<<<<<<<<<
* try:
* obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS,
*/
}
/* "View.MemoryView":439
* return None
*
* return obj # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assignment(self, dst, src):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_obj);
__pyx_r = __pyx_v_obj;
goto __pyx_L0;
/* "View.MemoryView":431
* self.setitem_indexed(index, value)
*
* cdef is_slice(self, obj): # <<<<<<<<<<<<<<
* if not isinstance(obj, memoryview):
* try:
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_obj);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) {
__Pyx_memviewslice __pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_src_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
__Pyx_memviewslice *__pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assignment", 0);
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(2, 445, __pyx_L1_error)
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 445, __pyx_L1_error)
/* "View.MemoryView":446
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<<
* src.ndim, dst.ndim, self.dtype_is_object)
*
*/
if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(2, 446, __pyx_L1_error)
__pyx_t_2 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_2 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 446, __pyx_L1_error)
/* "View.MemoryView":447
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0],
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<<
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
*/
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = __Pyx_PyInt_As_int(__pyx_t_3); if (unlikely((__pyx_t_5 == (int)-1) && PyErr_Occurred())) __PYX_ERR(2, 447, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":445
* cdef __Pyx_memviewslice src_slice
*
* memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<<
* get_slice_from_memview(dst, &dst_slice)[0],
* src.ndim, dst.ndim, self.dtype_is_object)
*/
__pyx_t_6 = __pyx_memoryview_copy_contents((__pyx_t_1[0]), (__pyx_t_2[0]), __pyx_t_4, __pyx_t_5, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 445, __pyx_L1_error)
/* "View.MemoryView":441
* return obj
*
* cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice dst_slice
* cdef __Pyx_memviewslice src_slice
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) {
int __pyx_v_array[0x80];
void *__pyx_v_tmp;
void *__pyx_v_item;
__Pyx_memviewslice *__pyx_v_dst_slice;
__Pyx_memviewslice __pyx_v_tmp_slice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_t_4;
int __pyx_t_5;
char const *__pyx_t_6;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
PyObject *__pyx_t_9 = NULL;
PyObject *__pyx_t_10 = NULL;
PyObject *__pyx_t_11 = NULL;
PyObject *__pyx_t_12 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0);
/* "View.MemoryView":451
* cdef setitem_slice_assign_scalar(self, memoryview dst, value):
* cdef int array[128]
* cdef void *tmp = NULL # <<<<<<<<<<<<<<
* cdef void *item
*
*/
__pyx_v_tmp = NULL;
/* "View.MemoryView":456
* cdef __Pyx_memviewslice *dst_slice
* cdef __Pyx_memviewslice tmp_slice
* dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<<
*
* if <size_t>self.view.itemsize > sizeof(array):
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 456, __pyx_L1_error)
__pyx_v_dst_slice = __pyx_t_1;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
__pyx_t_2 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":459
*
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<<
* if tmp == NULL:
* raise MemoryError
*/
__pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize);
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
__pyx_t_2 = ((__pyx_v_tmp == NULL) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":461
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
* raise MemoryError # <<<<<<<<<<<<<<
* item = tmp
* else:
*/
PyErr_NoMemory(); __PYX_ERR(2, 461, __pyx_L1_error)
/* "View.MemoryView":460
* if <size_t>self.view.itemsize > sizeof(array):
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL: # <<<<<<<<<<<<<<
* raise MemoryError
* item = tmp
*/
}
/* "View.MemoryView":462
* if tmp == NULL:
* raise MemoryError
* item = tmp # <<<<<<<<<<<<<<
* else:
* item = <void *> array
*/
__pyx_v_item = __pyx_v_tmp;
/* "View.MemoryView":458
* dst_slice = get_slice_from_memview(dst, &tmp_slice)
*
* if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<<
* tmp = PyMem_Malloc(self.view.itemsize)
* if tmp == NULL:
*/
goto __pyx_L3;
}
/* "View.MemoryView":464
* item = tmp
* else:
* item = <void *> array # <<<<<<<<<<<<<<
*
* try:
*/
/*else*/ {
__pyx_v_item = ((void *)__pyx_v_array);
}
__pyx_L3:;
/* "View.MemoryView":466
* item = <void *> array
*
* try: # <<<<<<<<<<<<<<
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value
*/
/*try:*/ {
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
__pyx_t_2 = (__pyx_v_self->dtype_is_object != 0);
if (__pyx_t_2) {
/* "View.MemoryView":468
* try:
* if self.dtype_is_object:
* (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<<
* else:
* self.assign_item_from_object(<char *> item, value)
*/
(((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value);
/* "View.MemoryView":467
*
* try:
* if self.dtype_is_object: # <<<<<<<<<<<<<<
* (<PyObject **> item)[0] = <PyObject *> value
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":470
* (<PyObject **> item)[0] = <PyObject *> value
* else:
* self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<<
*
*
*/
/*else*/ {
__pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 470, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L8:;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
__pyx_t_2 = ((__pyx_v_self->view.suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":475
*
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<<
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
* item, self.dtype_is_object)
*/
__pyx_t_3 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 475, __pyx_L6_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":474
*
*
* if self.view.suboffsets != NULL: # <<<<<<<<<<<<<<
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize,
*/
}
/* "View.MemoryView":476
* if self.view.suboffsets != NULL:
* assert_direct_dimensions(self.view.suboffsets, self.view.ndim)
* slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<<
* item, self.dtype_is_object)
* finally:
*/
__pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object);
}
/* "View.MemoryView":479
* item, self.dtype_is_object)
* finally:
* PyMem_Free(tmp) # <<<<<<<<<<<<<<
*
* cdef setitem_indexed(self, index, value):
*/
/*finally:*/ {
/*normal exit:*/{
PyMem_Free(__pyx_v_tmp);
goto __pyx_L7;
}
__pyx_L6_error:;
/*exception exit:*/{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0;
if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12);
if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_7);
__Pyx_XGOTREF(__pyx_t_8);
__Pyx_XGOTREF(__pyx_t_9);
__Pyx_XGOTREF(__pyx_t_10);
__Pyx_XGOTREF(__pyx_t_11);
__Pyx_XGOTREF(__pyx_t_12);
__pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename;
{
PyMem_Free(__pyx_v_tmp);
}
if (PY_MAJOR_VERSION >= 3) {
__Pyx_XGIVEREF(__pyx_t_10);
__Pyx_XGIVEREF(__pyx_t_11);
__Pyx_XGIVEREF(__pyx_t_12);
__Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12);
}
__Pyx_XGIVEREF(__pyx_t_7);
__Pyx_XGIVEREF(__pyx_t_8);
__Pyx_XGIVEREF(__pyx_t_9);
__Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9);
__pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0;
__pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6;
goto __pyx_L1_error;
}
__pyx_L7:;
}
/* "View.MemoryView":449
* src.ndim, dst.ndim, self.dtype_is_object)
*
* cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<<
* cdef int array[128]
* cdef void *tmp = NULL
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) {
char *__pyx_v_itemp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
char *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("setitem_indexed", 0);
/* "View.MemoryView":482
*
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<<
* self.assign_item_from_object(itemp, value)
*
*/
__pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(2, 482, __pyx_L1_error)
__pyx_v_itemp = __pyx_t_1;
/* "View.MemoryView":483
* cdef setitem_indexed(self, index, value):
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 483, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":481
* PyMem_Free(tmp)
*
* cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<<
* cdef char *itemp = self.get_item_pointer(index)
* self.assign_item_from_object(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_v_struct = NULL;
PyObject *__pyx_v_bytesitem = 0;
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
int __pyx_t_8;
PyObject *__pyx_t_9 = NULL;
size_t __pyx_t_10;
int __pyx_t_11;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":488
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef bytes bytesitem
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 488, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":491
* cdef bytes bytesitem
*
* bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<<
* try:
* result = struct.unpack(self.view.format, bytesitem)
*/
__pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 491, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_bytesitem = ((PyObject*)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
{
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4);
__Pyx_XGOTREF(__pyx_t_2);
__Pyx_XGOTREF(__pyx_t_3);
__Pyx_XGOTREF(__pyx_t_4);
/*try:*/ {
/* "View.MemoryView":493
* bytesitem = itemp[:self.view.itemsize]
* try:
* result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<<
* except struct.error:
* raise ValueError("Unable to convert item to object")
*/
__pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = NULL;
__pyx_t_8 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) {
__pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5);
if (likely(__pyx_t_7)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5);
__Pyx_INCREF(__pyx_t_7);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_5, function);
__pyx_t_8 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) {
PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem};
__pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
} else
#endif
{
__pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_9);
if (__pyx_t_7) {
__Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL;
}
__Pyx_GIVEREF(__pyx_t_6);
PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6);
__Pyx_INCREF(__pyx_v_bytesitem);
__Pyx_GIVEREF(__pyx_v_bytesitem);
PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem);
__pyx_t_6 = 0;
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 493, __pyx_L3_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
}
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__pyx_v_result = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
}
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
/*else:*/ {
__pyx_t_10 = strlen(__pyx_v_self->view.format);
__pyx_t_11 = ((__pyx_t_10 == 1) != 0);
if (__pyx_t_11) {
/* "View.MemoryView":498
* else:
* if len(self.view.format) == 1:
* return result[0] # <<<<<<<<<<<<<<
* return result
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 498, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L6_except_return;
/* "View.MemoryView":497
* raise ValueError("Unable to convert item to object")
* else:
* if len(self.view.format) == 1: # <<<<<<<<<<<<<<
* return result[0]
* return result
*/
}
/* "View.MemoryView":499
* if len(self.view.format) == 1:
* return result[0]
* return result # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_result);
__pyx_r = __pyx_v_result;
goto __pyx_L6_except_return;
}
__pyx_L3_error:;
__Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0;
/* "View.MemoryView":494
* try:
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error: # <<<<<<<<<<<<<<
* raise ValueError("Unable to convert item to object")
* else:
*/
__Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9);
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_6);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_9);
__pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_9 = 0;
if (__pyx_t_8) {
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
if (__Pyx_GetException(&__pyx_t_9, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(2, 494, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_9);
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GOTREF(__pyx_t_1);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 495, __pyx_L5_except_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_Raise(__pyx_t_6, 0, 0, 0);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
__PYX_ERR(2, 495, __pyx_L5_except_error)
}
goto __pyx_L5_except_error;
__pyx_L5_except_error:;
/* "View.MemoryView":492
*
* bytesitem = itemp[:self.view.itemsize]
* try: # <<<<<<<<<<<<<<
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
*/
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L1_error;
__pyx_L6_except_return:;
__Pyx_XGIVEREF(__pyx_t_2);
__Pyx_XGIVEREF(__pyx_t_3);
__Pyx_XGIVEREF(__pyx_t_4);
__Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4);
goto __pyx_L0;
}
/* "View.MemoryView":485
* self.assign_item_from_object(itemp, value)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesitem);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_v_struct = NULL;
char __pyx_v_c;
PyObject *__pyx_v_bytesvalue = 0;
Py_ssize_t __pyx_v_i;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_t_3;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
PyObject *__pyx_t_6 = NULL;
int __pyx_t_7;
PyObject *__pyx_t_8 = NULL;
Py_ssize_t __pyx_t_9;
PyObject *__pyx_t_10 = NULL;
char *__pyx_t_11;
char *__pyx_t_12;
char *__pyx_t_13;
char *__pyx_t_14;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":504
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
* import struct # <<<<<<<<<<<<<<
* cdef char c
* cdef bytes bytesvalue
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 504, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_v_struct = __pyx_t_1;
__pyx_t_1 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
__pyx_t_2 = PyTuple_Check(__pyx_v_value);
__pyx_t_3 = (__pyx_t_2 != 0);
if (__pyx_t_3) {
/* "View.MemoryView":510
*
* if isinstance(value, tuple):
* bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<<
* else:
* bytesvalue = struct.pack(self.view.format, value)
*/
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4);
__pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 510, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 510, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
/* "View.MemoryView":509
* cdef Py_ssize_t i
*
* if isinstance(value, tuple): # <<<<<<<<<<<<<<
* bytesvalue = struct.pack(self.view.format, *value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":512
* bytesvalue = struct.pack(self.view.format, *value)
* else:
* bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<<
*
* for i, c in enumerate(bytesvalue):
*/
/*else*/ {
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_5 = NULL;
__pyx_t_7 = 0;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_6, function);
__pyx_t_7 = 1;
}
}
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) {
PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value};
__pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
} else
#endif
{
__pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_8);
if (__pyx_t_5) {
__Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL;
}
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1);
__Pyx_INCREF(__pyx_v_value);
__Pyx_GIVEREF(__pyx_v_value);
PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value);
__pyx_t_1 = 0;
__pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 512, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0;
}
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(2, 512, __pyx_L1_error)
__pyx_v_bytesvalue = ((PyObject*)__pyx_t_4);
__pyx_t_4 = 0;
}
__pyx_L3:;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = 0;
if (unlikely(__pyx_v_bytesvalue == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable");
__PYX_ERR(2, 514, __pyx_L1_error)
}
__Pyx_INCREF(__pyx_v_bytesvalue);
__pyx_t_10 = __pyx_v_bytesvalue;
__pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10);
__pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10));
for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) {
__pyx_t_11 = __pyx_t_14;
__pyx_v_c = (__pyx_t_11[0]);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
__pyx_v_i = __pyx_t_9;
/* "View.MemoryView":514
* bytesvalue = struct.pack(self.view.format, value)
*
* for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<<
* itemp[i] = c
*
*/
__pyx_t_9 = (__pyx_t_9 + 1);
/* "View.MemoryView":515
*
* for i, c in enumerate(bytesvalue):
* itemp[i] = c # <<<<<<<<<<<<<<
*
* @cname('getbuffer')
*/
(__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c;
}
__Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0;
/* "View.MemoryView":501
* return result
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* """Only used if instantiated manually by the user, or if Cython doesn't
* know how to convert the type"""
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_XDECREF(__pyx_t_10);
__Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_struct);
__Pyx_XDECREF(__pyx_v_bytesvalue);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* Python wrapper */
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/
static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
char *__pyx_t_5;
void *__pyx_t_6;
int __pyx_t_7;
Py_ssize_t __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
if (__pyx_v_info == NULL) {
PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete");
return -1;
}
__Pyx_RefNannySetupContext("__getbuffer__", 0);
__pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None);
__Pyx_GIVEREF(__pyx_v_info->obj);
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
__pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_2 = (__pyx_v_self->view.readonly != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L4_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 520, __pyx_L1_error)
/* "View.MemoryView":519
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<<
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
*/
}
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":523
*
* if flags & PyBUF_ND:
* info.shape = self.view.shape # <<<<<<<<<<<<<<
* else:
* info.shape = NULL
*/
__pyx_t_4 = __pyx_v_self->view.shape;
__pyx_v_info->shape = __pyx_t_4;
/* "View.MemoryView":522
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*
* if flags & PyBUF_ND: # <<<<<<<<<<<<<<
* info.shape = self.view.shape
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":525
* info.shape = self.view.shape
* else:
* info.shape = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_STRIDES:
*/
/*else*/ {
__pyx_v_info->shape = NULL;
}
__pyx_L6:;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":528
*
* if flags & PyBUF_STRIDES:
* info.strides = self.view.strides # <<<<<<<<<<<<<<
* else:
* info.strides = NULL
*/
__pyx_t_4 = __pyx_v_self->view.strides;
__pyx_v_info->strides = __pyx_t_4;
/* "View.MemoryView":527
* info.shape = NULL
*
* if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<<
* info.strides = self.view.strides
* else:
*/
goto __pyx_L7;
}
/* "View.MemoryView":530
* info.strides = self.view.strides
* else:
* info.strides = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_INDIRECT:
*/
/*else*/ {
__pyx_v_info->strides = NULL;
}
__pyx_L7:;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":533
*
* if flags & PyBUF_INDIRECT:
* info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<<
* else:
* info.suboffsets = NULL
*/
__pyx_t_4 = __pyx_v_self->view.suboffsets;
__pyx_v_info->suboffsets = __pyx_t_4;
/* "View.MemoryView":532
* info.strides = NULL
*
* if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<<
* info.suboffsets = self.view.suboffsets
* else:
*/
goto __pyx_L8;
}
/* "View.MemoryView":535
* info.suboffsets = self.view.suboffsets
* else:
* info.suboffsets = NULL # <<<<<<<<<<<<<<
*
* if flags & PyBUF_FORMAT:
*/
/*else*/ {
__pyx_v_info->suboffsets = NULL;
}
__pyx_L8:;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
__pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":538
*
* if flags & PyBUF_FORMAT:
* info.format = self.view.format # <<<<<<<<<<<<<<
* else:
* info.format = NULL
*/
__pyx_t_5 = __pyx_v_self->view.format;
__pyx_v_info->format = __pyx_t_5;
/* "View.MemoryView":537
* info.suboffsets = NULL
*
* if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<<
* info.format = self.view.format
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":540
* info.format = self.view.format
* else:
* info.format = NULL # <<<<<<<<<<<<<<
*
* info.buf = self.view.buf
*/
/*else*/ {
__pyx_v_info->format = NULL;
}
__pyx_L9:;
/* "View.MemoryView":542
* info.format = NULL
*
* info.buf = self.view.buf # <<<<<<<<<<<<<<
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
*/
__pyx_t_6 = __pyx_v_self->view.buf;
__pyx_v_info->buf = __pyx_t_6;
/* "View.MemoryView":543
*
* info.buf = self.view.buf
* info.ndim = self.view.ndim # <<<<<<<<<<<<<<
* info.itemsize = self.view.itemsize
* info.len = self.view.len
*/
__pyx_t_7 = __pyx_v_self->view.ndim;
__pyx_v_info->ndim = __pyx_t_7;
/* "View.MemoryView":544
* info.buf = self.view.buf
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize # <<<<<<<<<<<<<<
* info.len = self.view.len
* info.readonly = self.view.readonly
*/
__pyx_t_8 = __pyx_v_self->view.itemsize;
__pyx_v_info->itemsize = __pyx_t_8;
/* "View.MemoryView":545
* info.ndim = self.view.ndim
* info.itemsize = self.view.itemsize
* info.len = self.view.len # <<<<<<<<<<<<<<
* info.readonly = self.view.readonly
* info.obj = self
*/
__pyx_t_8 = __pyx_v_self->view.len;
__pyx_v_info->len = __pyx_t_8;
/* "View.MemoryView":546
* info.itemsize = self.view.itemsize
* info.len = self.view.len
* info.readonly = self.view.readonly # <<<<<<<<<<<<<<
* info.obj = self
*
*/
__pyx_t_1 = __pyx_v_self->view.readonly;
__pyx_v_info->readonly = __pyx_t_1;
/* "View.MemoryView":547
* info.len = self.view.len
* info.readonly = self.view.readonly
* info.obj = self # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_INCREF(((PyObject *)__pyx_v_self));
__Pyx_GIVEREF(((PyObject *)__pyx_v_self));
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj);
__pyx_v_info->obj = ((PyObject *)__pyx_v_self);
/* "View.MemoryView":518
*
* @cname('getbuffer')
* def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<<
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview")
*/
/* function exit code */
__pyx_r = 0;
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
if (__pyx_v_info->obj != NULL) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
goto __pyx_L2;
__pyx_L0:;
if (__pyx_v_info->obj == Py_None) {
__Pyx_GOTREF(__pyx_v_info->obj);
__Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0;
}
__pyx_L2:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":554
* @property
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<<
* transpose_memslice(&result.from_slice)
* return result
*/
__pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 554, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(2, 554, __pyx_L1_error)
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":555
* def T(self):
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 555, __pyx_L1_error)
/* "View.MemoryView":556
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
* return result # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":553
*
* @property
* def T(self): # <<<<<<<<<<<<<<
* cdef _memoryviewslice result = memoryview_copy(self)
* transpose_memslice(&result.from_slice)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":560
* @property
* def base(self):
* return self.obj # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->obj);
__pyx_r = __pyx_v_self->obj;
goto __pyx_L0;
/* "View.MemoryView":559
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.obj
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_length;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":564
* @property
* def shape(self):
* return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_length = (__pyx_t_2[0]);
__pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
}
__pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 564, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":563
*
* @property
* def shape(self): # <<<<<<<<<<<<<<
* return tuple([length for length in self.view.shape[:self.view.ndim]])
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_stride;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
__pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 570, __pyx_L1_error)
/* "View.MemoryView":568
* @property
* def strides(self):
* if self.view.strides == NULL: # <<<<<<<<<<<<<<
*
* raise ValueError("Buffer view does not expose strides")
*/
}
/* "View.MemoryView":572
* raise ValueError("Buffer view does not expose strides")
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_v_stride = (__pyx_t_3[0]);
__pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
}
__pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 572, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_6;
__pyx_t_6 = 0;
goto __pyx_L0;
/* "View.MemoryView":567
*
* @property
* def strides(self): # <<<<<<<<<<<<<<
* if self.view.strides == NULL:
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
Py_ssize_t *__pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
__pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_tuple__15, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":576
* @property
* def suboffsets(self):
* if self.view.suboffsets == NULL: # <<<<<<<<<<<<<<
* return (-1,) * self.view.ndim
*
*/
}
/* "View.MemoryView":579
* return (-1,) * self.view.ndim
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim);
for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) {
__pyx_t_4 = __pyx_t_6;
__pyx_v_suboffset = (__pyx_t_4[0]);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
}
__pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 579, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":575
*
* @property
* def suboffsets(self): # <<<<<<<<<<<<<<
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":583
* @property
* def ndim(self):
* return self.view.ndim # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 583, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":582
*
* @property
* def ndim(self): # <<<<<<<<<<<<<<
* return self.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":587
* @property
* def itemsize(self):
* return self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 587, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":586
*
* @property
* def itemsize(self): # <<<<<<<<<<<<<<
* return self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":591
* @property
* def nbytes(self):
* return self.size * self.view.itemsize # <<<<<<<<<<<<<<
*
* @property
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 591, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_3;
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":590
*
* @property
* def nbytes(self): # <<<<<<<<<<<<<<
* return self.size * self.view.itemsize
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_v_result = NULL;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
Py_ssize_t *__pyx_t_5;
PyObject *__pyx_t_6 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
__pyx_t_1 = (__pyx_v_self->_size == Py_None);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":596
* def size(self):
* if self._size is None:
* result = 1 # <<<<<<<<<<<<<<
*
* for length in self.view.shape[:self.view.ndim]:
*/
__Pyx_INCREF(__pyx_int_1);
__pyx_v_result = __pyx_int_1;
/* "View.MemoryView":598
* result = 1
*
* for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<<
* result *= length
*
*/
__pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim);
for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) {
__pyx_t_3 = __pyx_t_5;
__pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 598, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6);
__pyx_t_6 = 0;
/* "View.MemoryView":599
*
* for length in self.view.shape[:self.view.ndim]:
* result *= length # <<<<<<<<<<<<<<
*
* self._size = result
*/
__pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 599, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6);
__pyx_t_6 = 0;
}
/* "View.MemoryView":601
* result *= length
*
* self._size = result # <<<<<<<<<<<<<<
*
* return self._size
*/
__Pyx_INCREF(__pyx_v_result);
__Pyx_GIVEREF(__pyx_v_result);
__Pyx_GOTREF(__pyx_v_self->_size);
__Pyx_DECREF(__pyx_v_self->_size);
__pyx_v_self->_size = __pyx_v_result;
/* "View.MemoryView":595
* @property
* def size(self):
* if self._size is None: # <<<<<<<<<<<<<<
* result = 1
*
*/
}
/* "View.MemoryView":603
* self._size = result
*
* return self._size # <<<<<<<<<<<<<<
*
* def __len__(self):
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->_size);
__pyx_r = __pyx_v_self->_size;
goto __pyx_L0;
/* "View.MemoryView":594
*
* @property
* def size(self): # <<<<<<<<<<<<<<
* if self._size is None:
* result = 1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_6);
__Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* Python wrapper */
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/
static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__len__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) {
Py_ssize_t __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("__len__", 0);
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
__pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":607
* def __len__(self):
* if self.view.ndim >= 1:
* return self.view.shape[0] # <<<<<<<<<<<<<<
*
* return 0
*/
__pyx_r = (__pyx_v_self->view.shape[0]);
goto __pyx_L0;
/* "View.MemoryView":606
*
* def __len__(self):
* if self.view.ndim >= 1: # <<<<<<<<<<<<<<
* return self.view.shape[0]
*
*/
}
/* "View.MemoryView":609
* return self.view.shape[0]
*
* return 0 # <<<<<<<<<<<<<<
*
* def __repr__(self):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":605
* return self._size
*
* def __len__(self): # <<<<<<<<<<<<<<
* if self.view.ndim >= 1:
* return self.view.shape[0]
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__repr__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__repr__", 0);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
/* "View.MemoryView":613
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self)) # <<<<<<<<<<<<<<
*
* def __str__(self):
*/
__pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 613, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
/* "View.MemoryView":612
*
* def __repr__(self):
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<<
* id(self))
*
*/
__pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 612, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":611
* return 0
*
* def __repr__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__,
* id(self))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* Python wrapper */
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__str__ (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__str__", 0);
/* "View.MemoryView":616
*
* def __str__(self):
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1);
__pyx_t_1 = 0;
__pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 616, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":615
* id(self))
*
* def __str__(self): # <<<<<<<<<<<<<<
* return "<MemoryView of %r object>" % (self.base.__class__.__name__,)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_c_contig", 0);
/* "View.MemoryView":622
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 622, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":623
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<<
*
* def is_f_contig(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 623, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":619
*
*
* def is_c_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice *__pyx_v_mslice;
__Pyx_memviewslice __pyx_v_tmp;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice *__pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("is_f_contig", 0);
/* "View.MemoryView":628
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<<
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
*/
__pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(2, 628, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":629
* cdef __Pyx_memviewslice tmp
* mslice = get_slice_from_memview(self, &tmp)
* return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<<
*
* def copy(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 629, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":625
* return slice_is_contig(mslice[0], 'C', self.view.ndim)
*
* def is_f_contig(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice *mslice
* cdef __Pyx_memviewslice tmp
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_mslice;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy", 0);
/* "View.MemoryView":633
* def copy(self):
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &mslice)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS));
/* "View.MemoryView":635
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*
* slice_copy(self, &mslice) # <<<<<<<<<<<<<<
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice));
/* "View.MemoryView":636
*
* slice_copy(self, &mslice)
* mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_C_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 636, __pyx_L1_error)
__pyx_v_mslice = __pyx_t_1;
/* "View.MemoryView":641
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<<
*
* def copy_fortran(self):
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 641, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":631
* return slice_is_contig(mslice[0], 'F', self.view.ndim)
*
* def copy(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice mslice
* cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* Python wrapper */
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0);
__pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) {
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
int __pyx_v_flags;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_memviewslice __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("copy_fortran", 0);
/* "View.MemoryView":645
* def copy_fortran(self):
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<<
*
* slice_copy(self, &src)
*/
__pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS));
/* "View.MemoryView":647
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*
* slice_copy(self, &src) # <<<<<<<<<<<<<<
* dst = slice_copy_contig(&src, "fortran", self.view.ndim,
* self.view.itemsize,
*/
__pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src));
/* "View.MemoryView":648
*
* slice_copy(self, &src)
* dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<<
* self.view.itemsize,
* flags|PyBUF_F_CONTIGUOUS,
*/
__pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(2, 648, __pyx_L1_error)
__pyx_v_dst = __pyx_t_1;
/* "View.MemoryView":653
* self.dtype_is_object)
*
* return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 653, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":643
* return memoryview_copy_from_slice(self, &mslice)
*
* def copy_fortran(self): # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice src, dst
* cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) {
struct __pyx_memoryview_obj *__pyx_v_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_cwrapper", 0);
/* "View.MemoryView":658
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<<
* result.typeinfo = typeinfo
* return result
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_o);
__Pyx_GIVEREF(__pyx_v_o);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 658, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":659
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo):
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo # <<<<<<<<<<<<<<
* return result
*
*/
__pyx_v_result->typeinfo = __pyx_v_typeinfo;
/* "View.MemoryView":660
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_check')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":657
*
* @cname('__pyx_memoryview_new')
* cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<<
* cdef memoryview result = memoryview(o, flags, dtype_is_object)
* result.typeinfo = typeinfo
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
__Pyx_RefNannySetupContext("memoryview_check", 0);
/* "View.MemoryView":664
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o):
* return isinstance(o, memoryview) # <<<<<<<<<<<<<<
*
* cdef tuple _unellipsify(object index, int ndim):
*/
__pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type);
__pyx_r = __pyx_t_1;
goto __pyx_L0;
/* "View.MemoryView":663
*
* @cname('__pyx_memoryview_check')
* cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<<
* return isinstance(o, memoryview)
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) {
PyObject *__pyx_v_tup = NULL;
PyObject *__pyx_v_result = NULL;
int __pyx_v_have_slices;
int __pyx_v_seen_ellipsis;
CYTHON_UNUSED PyObject *__pyx_v_idx = NULL;
PyObject *__pyx_v_item = NULL;
Py_ssize_t __pyx_v_nslices;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
Py_ssize_t __pyx_t_5;
PyObject *(*__pyx_t_6)(PyObject *);
PyObject *__pyx_t_7 = NULL;
Py_ssize_t __pyx_t_8;
int __pyx_t_9;
int __pyx_t_10;
PyObject *__pyx_t_11 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("_unellipsify", 0);
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
__pyx_t_1 = PyTuple_Check(__pyx_v_index);
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":672
* """
* if not isinstance(index, tuple):
* tup = (index,) # <<<<<<<<<<<<<<
* else:
* tup = index
*/
__pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 672, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_index);
__Pyx_GIVEREF(__pyx_v_index);
PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index);
__pyx_v_tup = __pyx_t_3;
__pyx_t_3 = 0;
/* "View.MemoryView":671
* full slices.
* """
* if not isinstance(index, tuple): # <<<<<<<<<<<<<<
* tup = (index,)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":674
* tup = (index,)
* else:
* tup = index # <<<<<<<<<<<<<<
*
* result = []
*/
/*else*/ {
__Pyx_INCREF(__pyx_v_index);
__pyx_v_tup = __pyx_v_index;
}
__pyx_L3:;
/* "View.MemoryView":676
* tup = index
*
* result = [] # <<<<<<<<<<<<<<
* have_slices = False
* seen_ellipsis = False
*/
__pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 676, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_v_result = ((PyObject*)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":677
*
* result = []
* have_slices = False # <<<<<<<<<<<<<<
* seen_ellipsis = False
* for idx, item in enumerate(tup):
*/
__pyx_v_have_slices = 0;
/* "View.MemoryView":678
* result = []
* have_slices = False
* seen_ellipsis = False # <<<<<<<<<<<<<<
* for idx, item in enumerate(tup):
* if item is Ellipsis:
*/
__pyx_v_seen_ellipsis = 0;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
__Pyx_INCREF(__pyx_int_0);
__pyx_t_3 = __pyx_int_0;
if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) {
__pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0;
__pyx_t_6 = NULL;
} else {
__pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 679, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_6)) {
if (likely(PyList_CheckExact(__pyx_t_4))) {
if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
} else {
if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(2, 679, __pyx_L1_error)
#else
__pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
#endif
}
} else {
__pyx_t_7 = __pyx_t_6(__pyx_t_4);
if (unlikely(!__pyx_t_7)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 679, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_7);
}
__Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7);
__pyx_t_7 = 0;
__Pyx_INCREF(__pyx_t_3);
__Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3);
__pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 679, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_3);
__pyx_t_3 = __pyx_t_7;
__pyx_t_7 = 0;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
__pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
__pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
__pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__18);
__Pyx_GIVEREF(__pyx_slice__18);
PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__18);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
/* "View.MemoryView":683
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True # <<<<<<<<<<<<<<
* else:
* result.append(slice(None))
*/
__pyx_v_seen_ellipsis = 1;
/* "View.MemoryView":681
* for idx, item in enumerate(tup):
* if item is Ellipsis:
* if not seen_ellipsis: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * (ndim - len(tup) + 1))
* seen_ellipsis = True
*/
goto __pyx_L7;
}
/* "View.MemoryView":685
* seen_ellipsis = True
* else:
* result.append(slice(None)) # <<<<<<<<<<<<<<
* have_slices = True
* else:
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__18); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 685, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":686
* else:
* result.append(slice(None))
* have_slices = True # <<<<<<<<<<<<<<
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
*/
__pyx_v_have_slices = 1;
/* "View.MemoryView":680
* seen_ellipsis = False
* for idx, item in enumerate(tup):
* if item is Ellipsis: # <<<<<<<<<<<<<<
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1))
*/
goto __pyx_L6;
}
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
/*else*/ {
__pyx_t_2 = PySlice_Check(__pyx_v_item);
__pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0);
if (__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0);
__pyx_t_1 = __pyx_t_10;
__pyx_L9_bool_binop_done:;
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":689
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item):
* raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<<
*
* have_slices = have_slices or isinstance(item, slice)
*/
__pyx_t_7 = __Pyx_PyString_FormatSafe(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__pyx_t_11 = __Pyx_PyObject_CallOneArg(__pyx_builtin_TypeError, __pyx_t_7); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 689, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_Raise(__pyx_t_11, 0, 0, 0);
__Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0;
__PYX_ERR(2, 689, __pyx_L1_error)
/* "View.MemoryView":688
* have_slices = True
* else:
* if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<<
* raise TypeError("Cannot index with type '%s'" % type(item))
*
*/
}
/* "View.MemoryView":691
* raise TypeError("Cannot index with type '%s'" % type(item))
*
* have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<<
* result.append(item)
*
*/
__pyx_t_10 = (__pyx_v_have_slices != 0);
if (!__pyx_t_10) {
} else {
__pyx_t_1 = __pyx_t_10;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = PySlice_Check(__pyx_v_item);
__pyx_t_2 = (__pyx_t_10 != 0);
__pyx_t_1 = __pyx_t_2;
__pyx_L11_bool_binop_done:;
__pyx_v_have_slices = __pyx_t_1;
/* "View.MemoryView":692
*
* have_slices = have_slices or isinstance(item, slice)
* result.append(item) # <<<<<<<<<<<<<<
*
* nslices = ndim - len(result)
*/
__pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 692, __pyx_L1_error)
}
__pyx_L6:;
/* "View.MemoryView":679
* have_slices = False
* seen_ellipsis = False
* for idx, item in enumerate(tup): # <<<<<<<<<<<<<<
* if item is Ellipsis:
* if not seen_ellipsis:
*/
}
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":694
* result.append(item)
*
* nslices = ndim - len(result) # <<<<<<<<<<<<<<
* if nslices:
* result.extend([slice(None)] * nslices)
*/
__pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(2, 694, __pyx_L1_error)
__pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5);
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
__pyx_t_1 = (__pyx_v_nslices != 0);
if (__pyx_t_1) {
/* "View.MemoryView":696
* nslices = ndim - len(result)
* if nslices:
* result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<<
*
* return have_slices or nslices, tuple(result)
*/
__pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 696, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
{ Py_ssize_t __pyx_temp;
for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) {
__Pyx_INCREF(__pyx_slice__18);
__Pyx_GIVEREF(__pyx_slice__18);
PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__18);
}
}
__pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 696, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":695
*
* nslices = ndim - len(result)
* if nslices: # <<<<<<<<<<<<<<
* result.extend([slice(None)] * nslices)
*
*/
}
/* "View.MemoryView":698
* result.extend([slice(None)] * nslices)
*
* return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<<
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
*/
__Pyx_XDECREF(__pyx_r);
if (!__pyx_v_have_slices) {
} else {
__pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
goto __pyx_L14_bool_binop_done;
}
__pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_3 = __pyx_t_4;
__pyx_t_4 = 0;
__pyx_L14_bool_binop_done:;
__pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__pyx_t_11 = PyTuple_New(2); if (unlikely(!__pyx_t_11)) __PYX_ERR(2, 698, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_11);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_3);
__Pyx_GIVEREF(__pyx_t_4);
PyTuple_SET_ITEM(__pyx_t_11, 1, __pyx_t_4);
__pyx_t_3 = 0;
__pyx_t_4 = 0;
__pyx_r = ((PyObject*)__pyx_t_11);
__pyx_t_11 = 0;
goto __pyx_L0;
/* "View.MemoryView":666
* return isinstance(o, memoryview)
*
* cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<<
* """
* Replace all ellipses with full slices and fill incomplete indices with
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_11);
__Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v_tup);
__Pyx_XDECREF(__pyx_v_result);
__Pyx_XDECREF(__pyx_v_idx);
__Pyx_XDECREF(__pyx_v_item);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
int __pyx_t_4;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assert_direct_dimensions", 0);
/* "View.MemoryView":701
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported")
*/
__pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim);
for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) {
__pyx_t_1 = __pyx_t_3;
__pyx_v_suboffset = (__pyx_t_1[0]);
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
__pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0);
if (unlikely(__pyx_t_4)) {
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__Pyx_Raise(__pyx_t_5, 0, 0, 0);
__Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0;
__PYX_ERR(2, 703, __pyx_L1_error)
/* "View.MemoryView":702
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim):
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* raise ValueError("Indirect dimensions not supported")
*
*/
}
}
/* "View.MemoryView":700
* return have_slices or nslices, tuple(result)
*
* cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<<
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) {
int __pyx_v_new_ndim;
int __pyx_v_suboffset_dim;
int __pyx_v_dim;
__Pyx_memviewslice __pyx_v_src;
__Pyx_memviewslice __pyx_v_dst;
__Pyx_memviewslice *__pyx_v_p_src;
struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0;
__Pyx_memviewslice *__pyx_v_p_dst;
int *__pyx_v_p_suboffset_dim;
Py_ssize_t __pyx_v_start;
Py_ssize_t __pyx_v_stop;
Py_ssize_t __pyx_v_step;
int __pyx_v_have_start;
int __pyx_v_have_stop;
int __pyx_v_have_step;
PyObject *__pyx_v_index = NULL;
struct __pyx_memoryview_obj *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
struct __pyx_memoryview_obj *__pyx_t_4;
char *__pyx_t_5;
int __pyx_t_6;
Py_ssize_t __pyx_t_7;
PyObject *(*__pyx_t_8)(PyObject *);
PyObject *__pyx_t_9 = NULL;
Py_ssize_t __pyx_t_10;
int __pyx_t_11;
Py_ssize_t __pyx_t_12;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memview_slice", 0);
/* "View.MemoryView":711
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices):
* cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<<
* cdef bint negative_step
* cdef __Pyx_memviewslice src, dst
*/
__pyx_v_new_ndim = 0;
__pyx_v_suboffset_dim = -1;
/* "View.MemoryView":718
*
*
* memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<<
*
* cdef _memoryviewslice memviewsliceobj
*/
(void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))));
/* "View.MemoryView":722
* cdef _memoryviewslice memviewsliceobj
*
* assert memview.view.ndim > 0 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
#ifndef CYTHON_WITHOUT_ASSERTIONS
if (unlikely(!Py_OptimizeFlag)) {
if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) {
PyErr_SetNone(PyExc_AssertionError);
__PYX_ERR(2, 722, __pyx_L1_error)
}
}
#endif
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":725
*
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview # <<<<<<<<<<<<<<
* p_src = &memviewsliceobj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 725, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":726
* if isinstance(memview, _memoryviewslice):
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, &src)
*/
__pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice);
/* "View.MemoryView":724
* assert memview.view.ndim > 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* memviewsliceobj = memview
* p_src = &memviewsliceobj.from_slice
*/
goto __pyx_L3;
}
/* "View.MemoryView":728
* p_src = &memviewsliceobj.from_slice
* else:
* slice_copy(memview, &src) # <<<<<<<<<<<<<<
* p_src = &src
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src));
/* "View.MemoryView":729
* else:
* slice_copy(memview, &src)
* p_src = &src # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_p_src = (&__pyx_v_src);
}
__pyx_L3:;
/* "View.MemoryView":735
*
*
* dst.memview = p_src.memview # <<<<<<<<<<<<<<
* dst.data = p_src.data
*
*/
__pyx_t_4 = __pyx_v_p_src->memview;
__pyx_v_dst.memview = __pyx_t_4;
/* "View.MemoryView":736
*
* dst.memview = p_src.memview
* dst.data = p_src.data # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_v_p_src->data;
__pyx_v_dst.data = __pyx_t_5;
/* "View.MemoryView":741
*
*
* cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<<
* cdef int *p_suboffset_dim = &suboffset_dim
* cdef Py_ssize_t start, stop, step
*/
__pyx_v_p_dst = (&__pyx_v_dst);
/* "View.MemoryView":742
*
* cdef __Pyx_memviewslice *p_dst = &dst
* cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<<
* cdef Py_ssize_t start, stop, step
* cdef bint have_start, have_stop, have_step
*/
__pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim);
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
__pyx_t_6 = 0;
if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) {
__pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0;
__pyx_t_8 = NULL;
} else {
__pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 746, __pyx_L1_error)
}
for (;;) {
if (likely(!__pyx_t_8)) {
if (likely(PyList_CheckExact(__pyx_t_3))) {
if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
} else {
if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break;
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
__pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(2, 746, __pyx_L1_error)
#else
__pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 746, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
#endif
}
} else {
__pyx_t_9 = __pyx_t_8(__pyx_t_3);
if (unlikely(!__pyx_t_9)) {
PyObject* exc_type = PyErr_Occurred();
if (exc_type) {
if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear();
else __PYX_ERR(2, 746, __pyx_L1_error)
}
break;
}
__Pyx_GOTREF(__pyx_t_9);
}
__Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9);
__pyx_t_9 = 0;
__pyx_v_dim = __pyx_t_6;
__pyx_t_6 = (__pyx_t_6 + 1);
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
__pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":751
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
* index, 0, 0, # start, stop, step # <<<<<<<<<<<<<<
* 0, 0, 0, # have_{start,stop,step}
* False)
*/
__pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 751, __pyx_L1_error)
/* "View.MemoryView":748
* for dim, index in enumerate(indices):
* if PyIndex_Check(index):
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 748, __pyx_L1_error)
/* "View.MemoryView":747
*
* for dim, index in enumerate(indices):
* if PyIndex_Check(index): # <<<<<<<<<<<<<<
* slice_memviewslice(
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
*/
goto __pyx_L6;
}
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
__pyx_t_2 = (__pyx_v_index == Py_None);
__pyx_t_1 = (__pyx_t_2 != 0);
if (__pyx_t_1) {
/* "View.MemoryView":755
* False)
* elif index is None:
* p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<<
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
*/
(__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1;
/* "View.MemoryView":756
* elif index is None:
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<<
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1
*/
(__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0;
/* "View.MemoryView":757
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<<
* new_ndim += 1
* else:
*/
(__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L;
/* "View.MemoryView":758
* p_dst.strides[new_ndim] = 0
* p_dst.suboffsets[new_ndim] = -1
* new_ndim += 1 # <<<<<<<<<<<<<<
* else:
* start = index.start or 0
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
/* "View.MemoryView":754
* 0, 0, 0, # have_{start,stop,step}
* False)
* elif index is None: # <<<<<<<<<<<<<<
* p_dst.shape[new_ndim] = 1
* p_dst.strides[new_ndim] = 0
*/
goto __pyx_L6;
}
/* "View.MemoryView":760
* new_ndim += 1
* else:
* start = index.start or 0 # <<<<<<<<<<<<<<
* stop = index.stop or 0
* step = index.step or 0
*/
/*else*/ {
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 760, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 760, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 760, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L7_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L7_bool_binop_done:;
__pyx_v_start = __pyx_t_10;
/* "View.MemoryView":761
* else:
* start = index.start or 0
* stop = index.stop or 0 # <<<<<<<<<<<<<<
* step = index.step or 0
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 761, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 761, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 761, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L9_bool_binop_done:;
__pyx_v_stop = __pyx_t_10;
/* "View.MemoryView":762
* start = index.start or 0
* stop = index.stop or 0
* step = index.step or 0 # <<<<<<<<<<<<<<
*
* have_start = index.start is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 762, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(2, 762, __pyx_L1_error)
if (!__pyx_t_1) {
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
} else {
__pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 762, __pyx_L1_error)
__pyx_t_10 = __pyx_t_12;
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
goto __pyx_L11_bool_binop_done;
}
__pyx_t_10 = 0;
__pyx_L11_bool_binop_done:;
__pyx_v_step = __pyx_t_10;
/* "View.MemoryView":764
* step = index.step or 0
*
* have_start = index.start is not None # <<<<<<<<<<<<<<
* have_stop = index.stop is not None
* have_step = index.step is not None
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 764, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_start = __pyx_t_1;
/* "View.MemoryView":765
*
* have_start = index.start is not None
* have_stop = index.stop is not None # <<<<<<<<<<<<<<
* have_step = index.step is not None
*
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 765, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_stop = __pyx_t_1;
/* "View.MemoryView":766
* have_start = index.start is not None
* have_stop = index.stop is not None
* have_step = index.step is not None # <<<<<<<<<<<<<<
*
* slice_memviewslice(
*/
__pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(2, 766, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_9);
__pyx_t_1 = (__pyx_t_9 != Py_None);
__Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0;
__pyx_v_have_step = __pyx_t_1;
/* "View.MemoryView":768
* have_step = index.step is not None
*
* slice_memviewslice( # <<<<<<<<<<<<<<
* p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim],
* dim, new_ndim, p_suboffset_dim,
*/
__pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(2, 768, __pyx_L1_error)
/* "View.MemoryView":774
* have_start, have_stop, have_step,
* True)
* new_ndim += 1 # <<<<<<<<<<<<<<
*
* if isinstance(memview, _memoryviewslice):
*/
__pyx_v_new_ndim = (__pyx_v_new_ndim + 1);
}
__pyx_L6:;
/* "View.MemoryView":746
* cdef bint have_start, have_stop, have_step
*
* for dim, index in enumerate(indices): # <<<<<<<<<<<<<<
* if PyIndex_Check(index):
* slice_memviewslice(
*/
}
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":778
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func, # <<<<<<<<<<<<<<
* memviewsliceobj.to_dtype_func,
* memview.dtype_is_object)
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 778, __pyx_L1_error) }
/* "View.MemoryView":779
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
* else:
*/
if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(2, 779, __pyx_L1_error) }
/* "View.MemoryView":777
*
* if isinstance(memview, _memoryviewslice):
* return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<<
* memviewsliceobj.to_object_func,
* memviewsliceobj.to_dtype_func,
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 777, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 777, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
/* "View.MemoryView":776
* new_ndim += 1
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* return memoryview_fromslice(dst, new_ndim,
* memviewsliceobj.to_object_func,
*/
}
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
/*else*/ {
__Pyx_XDECREF(((PyObject *)__pyx_r));
/* "View.MemoryView":783
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 782, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
/* "View.MemoryView":782
* memview.dtype_is_object)
* else:
* return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<<
* memview.dtype_is_object)
*
*/
if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(2, 782, __pyx_L1_error)
__pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3);
__pyx_t_3 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":710
*
* @cname('__pyx_memview_slice')
* cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<<
* cdef int new_ndim = 0, suboffset_dim = -1, dim
* cdef bint negative_step
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_9);
__Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj);
__Pyx_XDECREF(__pyx_v_index);
__Pyx_XGIVEREF((PyObject *)__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) {
Py_ssize_t __pyx_v_new_shape;
int __pyx_v_negative_step;
int __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
__pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
__pyx_t_1 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":830
*
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":829
* if not is_slice:
*
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if not 0 <= start < shape:
*/
}
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
__pyx_t_1 = (0 <= __pyx_v_start);
if (__pyx_t_1) {
__pyx_t_1 = (__pyx_v_start < __pyx_v_shape);
}
__pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":832
* start += shape
* if not 0 <= start < shape:
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<<
* else:
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 832, __pyx_L1_error)
/* "View.MemoryView":831
* if start < 0:
* start += shape
* if not 0 <= start < shape: # <<<<<<<<<<<<<<
* _err_dim(IndexError, "Index out of bounds (axis %d)", dim)
* else:
*/
}
/* "View.MemoryView":827
* cdef bint negative_step
*
* if not is_slice: # <<<<<<<<<<<<<<
*
* if start < 0:
*/
goto __pyx_L3;
}
/* "View.MemoryView":835
* else:
*
* negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<<
*
* if have_step and step == 0:
*/
/*else*/ {
__pyx_t_1 = ((__pyx_v_have_step != 0) != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step < 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L6_bool_binop_done:;
__pyx_v_negative_step = __pyx_t_2;
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
__pyx_t_1 = (__pyx_v_have_step != 0);
if (__pyx_t_1) {
} else {
__pyx_t_2 = __pyx_t_1;
goto __pyx_L9_bool_binop_done;
}
__pyx_t_1 = ((__pyx_v_step == 0) != 0);
__pyx_t_2 = __pyx_t_1;
__pyx_L9_bool_binop_done:;
if (__pyx_t_2) {
/* "View.MemoryView":838
*
* if have_step and step == 0:
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 838, __pyx_L1_error)
/* "View.MemoryView":837
* negative_step = have_step != 0 and step < 0
*
* if have_step and step == 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Step may not be zero (axis %d)", dim)
*
*/
}
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
__pyx_t_2 = (__pyx_v_have_start != 0);
if (__pyx_t_2) {
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":843
* if have_start:
* if start < 0:
* start += shape # <<<<<<<<<<<<<<
* if start < 0:
* start = 0
*/
__pyx_v_start = (__pyx_v_start + __pyx_v_shape);
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
__pyx_t_2 = ((__pyx_v_start < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":845
* start += shape
* if start < 0:
* start = 0 # <<<<<<<<<<<<<<
* elif start >= shape:
* if negative_step:
*/
__pyx_v_start = 0;
/* "View.MemoryView":844
* if start < 0:
* start += shape
* if start < 0: # <<<<<<<<<<<<<<
* start = 0
* elif start >= shape:
*/
}
/* "View.MemoryView":842
*
* if have_start:
* if start < 0: # <<<<<<<<<<<<<<
* start += shape
* if start < 0:
*/
goto __pyx_L12;
}
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
__pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":848
* elif start >= shape:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = shape
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":847
* start = 0
* elif start >= shape:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L14;
}
/* "View.MemoryView":850
* start = shape - 1
* else:
* start = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
/*else*/ {
__pyx_v_start = __pyx_v_shape;
}
__pyx_L14:;
/* "View.MemoryView":846
* if start < 0:
* start = 0
* elif start >= shape: # <<<<<<<<<<<<<<
* if negative_step:
* start = shape - 1
*/
}
__pyx_L12:;
/* "View.MemoryView":841
*
*
* if have_start: # <<<<<<<<<<<<<<
* if start < 0:
* start += shape
*/
goto __pyx_L11;
}
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":853
* else:
* if negative_step:
* start = shape - 1 # <<<<<<<<<<<<<<
* else:
* start = 0
*/
__pyx_v_start = (__pyx_v_shape - 1);
/* "View.MemoryView":852
* start = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* start = shape - 1
* else:
*/
goto __pyx_L15;
}
/* "View.MemoryView":855
* start = shape - 1
* else:
* start = 0 # <<<<<<<<<<<<<<
*
* if have_stop:
*/
/*else*/ {
__pyx_v_start = 0;
}
__pyx_L15:;
}
__pyx_L11:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
__pyx_t_2 = (__pyx_v_have_stop != 0);
if (__pyx_t_2) {
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":859
* if have_stop:
* if stop < 0:
* stop += shape # <<<<<<<<<<<<<<
* if stop < 0:
* stop = 0
*/
__pyx_v_stop = (__pyx_v_stop + __pyx_v_shape);
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
__pyx_t_2 = ((__pyx_v_stop < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":861
* stop += shape
* if stop < 0:
* stop = 0 # <<<<<<<<<<<<<<
* elif stop > shape:
* stop = shape
*/
__pyx_v_stop = 0;
/* "View.MemoryView":860
* if stop < 0:
* stop += shape
* if stop < 0: # <<<<<<<<<<<<<<
* stop = 0
* elif stop > shape:
*/
}
/* "View.MemoryView":858
*
* if have_stop:
* if stop < 0: # <<<<<<<<<<<<<<
* stop += shape
* if stop < 0:
*/
goto __pyx_L17;
}
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
__pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":863
* stop = 0
* elif stop > shape:
* stop = shape # <<<<<<<<<<<<<<
* else:
* if negative_step:
*/
__pyx_v_stop = __pyx_v_shape;
/* "View.MemoryView":862
* if stop < 0:
* stop = 0
* elif stop > shape: # <<<<<<<<<<<<<<
* stop = shape
* else:
*/
}
__pyx_L17:;
/* "View.MemoryView":857
* start = 0
*
* if have_stop: # <<<<<<<<<<<<<<
* if stop < 0:
* stop += shape
*/
goto __pyx_L16;
}
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
/*else*/ {
__pyx_t_2 = (__pyx_v_negative_step != 0);
if (__pyx_t_2) {
/* "View.MemoryView":866
* else:
* if negative_step:
* stop = -1 # <<<<<<<<<<<<<<
* else:
* stop = shape
*/
__pyx_v_stop = -1L;
/* "View.MemoryView":865
* stop = shape
* else:
* if negative_step: # <<<<<<<<<<<<<<
* stop = -1
* else:
*/
goto __pyx_L19;
}
/* "View.MemoryView":868
* stop = -1
* else:
* stop = shape # <<<<<<<<<<<<<<
*
* if not have_step:
*/
/*else*/ {
__pyx_v_stop = __pyx_v_shape;
}
__pyx_L19:;
}
__pyx_L16:;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
__pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":871
*
* if not have_step:
* step = 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_step = 1;
/* "View.MemoryView":870
* stop = shape
*
* if not have_step: # <<<<<<<<<<<<<<
* step = 1
*
*/
}
/* "View.MemoryView":875
*
* with cython.cdivision(True):
* new_shape = (stop - start) // step # <<<<<<<<<<<<<<
*
* if (stop - start) - step * new_shape:
*/
__pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
__pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":878
*
* if (stop - start) - step * new_shape:
* new_shape += 1 # <<<<<<<<<<<<<<
*
* if new_shape < 0:
*/
__pyx_v_new_shape = (__pyx_v_new_shape + 1);
/* "View.MemoryView":877
* new_shape = (stop - start) // step
*
* if (stop - start) - step * new_shape: # <<<<<<<<<<<<<<
* new_shape += 1
*
*/
}
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
__pyx_t_2 = ((__pyx_v_new_shape < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":881
*
* if new_shape < 0:
* new_shape = 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_new_shape = 0;
/* "View.MemoryView":880
* new_shape += 1
*
* if new_shape < 0: # <<<<<<<<<<<<<<
* new_shape = 0
*
*/
}
/* "View.MemoryView":884
*
*
* dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<<
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset
*/
(__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step);
/* "View.MemoryView":885
*
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<<
* dst.suboffsets[new_ndim] = suboffset
*
*/
(__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape;
/* "View.MemoryView":886
* dst.strides[new_ndim] = stride * step
* dst.shape[new_ndim] = new_shape
* dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset;
}
__pyx_L3:;
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
__pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":890
*
* if suboffset_dim[0] < 0:
* dst.data += start * stride # <<<<<<<<<<<<<<
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride
*/
__pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride));
/* "View.MemoryView":889
*
*
* if suboffset_dim[0] < 0: # <<<<<<<<<<<<<<
* dst.data += start * stride
* else:
*/
goto __pyx_L23;
}
/* "View.MemoryView":892
* dst.data += start * stride
* else:
* dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<<
*
* if suboffset >= 0:
*/
/*else*/ {
__pyx_t_3 = (__pyx_v_suboffset_dim[0]);
(__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride));
}
__pyx_L23:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
__pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
__pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":897
* if not is_slice:
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<<
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
*/
__pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":896
* if suboffset >= 0:
* if not is_slice:
* if new_ndim == 0: # <<<<<<<<<<<<<<
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
*/
goto __pyx_L26;
}
/* "View.MemoryView":899
* dst.data = (<char **> dst.data)[0] + suboffset
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<<
* "must be indexed and not sliced", dim)
* else:
*/
/*else*/ {
/* "View.MemoryView":900
* else:
* _err_dim(IndexError, "All dimensions preceding dimension %d "
* "must be indexed and not sliced", dim) # <<<<<<<<<<<<<<
* else:
* suboffset_dim[0] = new_ndim
*/
__pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 899, __pyx_L1_error)
}
__pyx_L26:;
/* "View.MemoryView":895
*
* if suboffset >= 0:
* if not is_slice: # <<<<<<<<<<<<<<
* if new_ndim == 0:
* dst.data = (<char **> dst.data)[0] + suboffset
*/
goto __pyx_L25;
}
/* "View.MemoryView":902
* "must be indexed and not sliced", dim)
* else:
* suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<<
*
* return 0
*/
/*else*/ {
(__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim;
}
__pyx_L25:;
/* "View.MemoryView":894
* dst.suboffsets[suboffset_dim[0]] += start * stride
*
* if suboffset >= 0: # <<<<<<<<<<<<<<
* if not is_slice:
* if new_ndim == 0:
*/
}
/* "View.MemoryView":904
* suboffset_dim[0] = new_ndim
*
* return 0 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":807
*
* @cname('__pyx_memoryview_slice_memviewslice')
* cdef int slice_memviewslice( # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_suboffset;
Py_ssize_t __pyx_v_itemsize;
char *__pyx_v_resultp;
char *__pyx_r;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("pybuffer_index", 0);
/* "View.MemoryView":912
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index,
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<<
* cdef Py_ssize_t itemsize = view.itemsize
* cdef char *resultp
*/
__pyx_v_suboffset = -1L;
/* "View.MemoryView":913
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
* cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<<
* cdef char *resultp
*
*/
__pyx_t_1 = __pyx_v_view->itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
__pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":917
*
* if view.ndim == 0:
* shape = view.len / itemsize # <<<<<<<<<<<<<<
* stride = itemsize
* else:
*/
if (unlikely(__pyx_v_itemsize == 0)) {
PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero");
__PYX_ERR(2, 917, __pyx_L1_error)
}
else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) {
PyErr_SetString(PyExc_OverflowError, "value too large to perform division");
__PYX_ERR(2, 917, __pyx_L1_error)
}
__pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize);
/* "View.MemoryView":918
* if view.ndim == 0:
* shape = view.len / itemsize
* stride = itemsize # <<<<<<<<<<<<<<
* else:
* shape = view.shape[dim]
*/
__pyx_v_stride = __pyx_v_itemsize;
/* "View.MemoryView":916
* cdef char *resultp
*
* if view.ndim == 0: # <<<<<<<<<<<<<<
* shape = view.len / itemsize
* stride = itemsize
*/
goto __pyx_L3;
}
/* "View.MemoryView":920
* stride = itemsize
* else:
* shape = view.shape[dim] # <<<<<<<<<<<<<<
* stride = view.strides[dim]
* if view.suboffsets != NULL:
*/
/*else*/ {
__pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]);
/* "View.MemoryView":921
* else:
* shape = view.shape[dim]
* stride = view.strides[dim] # <<<<<<<<<<<<<<
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim]
*/
__pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
__pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":923
* stride = view.strides[dim]
* if view.suboffsets != NULL:
* suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<<
*
* if index < 0:
*/
__pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]);
/* "View.MemoryView":922
* shape = view.shape[dim]
* stride = view.strides[dim]
* if view.suboffsets != NULL: # <<<<<<<<<<<<<<
* suboffset = view.suboffsets[dim]
*
*/
}
}
__pyx_L3:;
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":926
*
* if index < 0:
* index += view.shape[dim] # <<<<<<<<<<<<<<
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*/
__pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim]));
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index < 0) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":928
* index += view.shape[dim]
* if index < 0:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* if index >= shape:
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 928, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 928, __pyx_L1_error)
/* "View.MemoryView":927
* if index < 0:
* index += view.shape[dim]
* if index < 0: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":925
* suboffset = view.suboffsets[dim]
*
* if index < 0: # <<<<<<<<<<<<<<
* index += view.shape[dim]
* if index < 0:
*/
}
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
__pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0);
if (unlikely(__pyx_t_2)) {
/* "View.MemoryView":931
*
* if index >= shape:
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<<
*
* resultp = bufp + index * stride
*/
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_builtin_IndexError, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 931, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 931, __pyx_L1_error)
/* "View.MemoryView":930
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* if index >= shape: # <<<<<<<<<<<<<<
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
*/
}
/* "View.MemoryView":933
* raise IndexError("Out of bounds on buffer access (axis %d)" % dim)
*
* resultp = bufp + index * stride # <<<<<<<<<<<<<<
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset
*/
__pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride));
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
__pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":935
* resultp = bufp + index * stride
* if suboffset >= 0:
* resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<<
*
* return resultp
*/
__pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset);
/* "View.MemoryView":934
*
* resultp = bufp + index * stride
* if suboffset >= 0: # <<<<<<<<<<<<<<
* resultp = (<char **> resultp)[0] + suboffset
*
*/
}
/* "View.MemoryView":937
* resultp = (<char **> resultp)[0] + suboffset
*
* return resultp # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_resultp;
goto __pyx_L0;
/* "View.MemoryView":910
*
* @cname('__pyx_pybuffer_index')
* cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<<
* Py_ssize_t dim) except NULL:
* cdef Py_ssize_t shape, stride, suboffset = -1
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) {
int __pyx_v_ndim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
int __pyx_v_i;
int __pyx_v_j;
int __pyx_r;
int __pyx_t_1;
Py_ssize_t *__pyx_t_2;
long __pyx_t_3;
long __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
int __pyx_t_7;
int __pyx_t_8;
int __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":944
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0:
* cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<<
*
* cdef Py_ssize_t *shape = memslice.shape
*/
__pyx_t_1 = __pyx_v_memslice->memview->view.ndim;
__pyx_v_ndim = __pyx_t_1;
/* "View.MemoryView":946
* cdef int ndim = memslice.memview.view.ndim
*
* cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<<
* cdef Py_ssize_t *strides = memslice.strides
*
*/
__pyx_t_2 = __pyx_v_memslice->shape;
__pyx_v_shape = __pyx_t_2;
/* "View.MemoryView":947
*
* cdef Py_ssize_t *shape = memslice.shape
* cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = __pyx_v_memslice->strides;
__pyx_v_strides = __pyx_t_2;
/* "View.MemoryView":951
*
* cdef int i, j
* for i in range(ndim / 2): # <<<<<<<<<<<<<<
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
*/
__pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2);
__pyx_t_4 = __pyx_t_3;
for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":952
* cdef int i, j
* for i in range(ndim / 2):
* j = ndim - 1 - i # <<<<<<<<<<<<<<
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i]
*/
__pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i);
/* "View.MemoryView":953
* for i in range(ndim / 2):
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<<
* shape[i], shape[j] = shape[j], shape[i]
*
*/
__pyx_t_5 = (__pyx_v_strides[__pyx_v_j]);
__pyx_t_6 = (__pyx_v_strides[__pyx_v_i]);
(__pyx_v_strides[__pyx_v_i]) = __pyx_t_5;
(__pyx_v_strides[__pyx_v_j]) = __pyx_t_6;
/* "View.MemoryView":954
* j = ndim - 1 - i
* strides[i], strides[j] = strides[j], strides[i]
* shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<<
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
*/
__pyx_t_6 = (__pyx_v_shape[__pyx_v_j]);
__pyx_t_5 = (__pyx_v_shape[__pyx_v_i]);
(__pyx_v_shape[__pyx_v_i]) = __pyx_t_6;
(__pyx_v_shape[__pyx_v_j]) = __pyx_t_5;
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0);
if (!__pyx_t_8) {
} else {
__pyx_t_7 = __pyx_t_8;
goto __pyx_L6_bool_binop_done;
}
__pyx_t_8 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0);
__pyx_t_7 = __pyx_t_8;
__pyx_L6_bool_binop_done:;
if (__pyx_t_7) {
/* "View.MemoryView":957
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0:
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<<
*
* return 1
*/
__pyx_t_9 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(2, 957, __pyx_L1_error)
/* "View.MemoryView":956
* shape[i], shape[j] = shape[j], shape[i]
*
* if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<<
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
*/
}
}
/* "View.MemoryView":959
* _err(ValueError, "Cannot transpose memoryview with indirect dimensions")
*
* return 1 # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = 1;
goto __pyx_L0;
/* "View.MemoryView":943
*
* @cname('__pyx_memslice_transpose')
* cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<<
* cdef int ndim = memslice.memview.view.ndim
*
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = 0;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* Python wrapper */
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/
static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0);
__pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
}
static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__dealloc__", 0);
/* "View.MemoryView":977
*
* def __dealloc__(self):
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<<
*
* cdef convert_item_to_object(self, char *itemp):
*/
__PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1);
/* "View.MemoryView":976
* cdef int (*to_dtype_func)(char *, object) except 0
*
* def __dealloc__(self): # <<<<<<<<<<<<<<
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("convert_item_to_object", 0);
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":981
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL:
* return self.to_object_func(itemp) # <<<<<<<<<<<<<<
* else:
* return memoryview.convert_item_to_object(self, itemp)
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 981, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
/* "View.MemoryView":980
*
* cdef convert_item_to_object(self, char *itemp):
* if self.to_object_func != NULL: # <<<<<<<<<<<<<<
* return self.to_object_func(itemp)
* else:
*/
}
/* "View.MemoryView":983
* return self.to_object_func(itemp)
* else:
* return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<<
*
* cdef assign_item_from_object(self, char *itemp, object value):
*/
/*else*/ {
__Pyx_XDECREF(__pyx_r);
__pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 983, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_r = __pyx_t_2;
__pyx_t_2 = 0;
goto __pyx_L0;
}
/* "View.MemoryView":979
* __PYX_XDEC_MEMVIEW(&self.from_slice, 1)
*
* cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<<
* if self.to_object_func != NULL:
* return self.to_object_func(itemp)
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("assign_item_from_object", 0);
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
__pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":987
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<<
* else:
* memoryview.assign_item_from_object(self, itemp, value)
*/
__pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(2, 987, __pyx_L1_error)
/* "View.MemoryView":986
*
* cdef assign_item_from_object(self, char *itemp, object value):
* if self.to_dtype_func != NULL: # <<<<<<<<<<<<<<
* self.to_dtype_func(itemp, value)
* else:
*/
goto __pyx_L3;
}
/* "View.MemoryView":989
* self.to_dtype_func(itemp, value)
* else:
* memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<<
*
* @property
*/
/*else*/ {
__pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 989, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
}
__pyx_L3:;
/* "View.MemoryView":985
* return memoryview.convert_item_to_object(self, itemp)
*
* cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<<
* if self.to_dtype_func != NULL:
* self.to_dtype_func(itemp, value)
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/
static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__ (wrapper)", 0);
__pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__get__", 0);
/* "View.MemoryView":993
* @property
* def base(self):
* return self.from_object # <<<<<<<<<<<<<<
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)")
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v_self->from_object);
__pyx_r = __pyx_v_self->from_object;
goto __pyx_L0;
/* "View.MemoryView":992
*
* @property
* def base(self): # <<<<<<<<<<<<<<
* return self.from_object
*
*/
/* function exit code */
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__reduce_cython__", 0);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 2, __pyx_L1_error)
/* "(tree fragment)":1
* def __reduce_cython__(self): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* Python wrapper */
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/
static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0);
__pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state));
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__setstate_cython__", 0);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 4, __pyx_L1_error)
/* "(tree fragment)":3
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<<
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) {
struct __pyx_memoryviewslice_obj *__pyx_v_result = 0;
Py_ssize_t __pyx_v_suboffset;
PyObject *__pyx_v_length = NULL;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
__Pyx_TypeInfo *__pyx_t_4;
Py_buffer __pyx_t_5;
Py_ssize_t *__pyx_t_6;
Py_ssize_t *__pyx_t_7;
Py_ssize_t *__pyx_t_8;
Py_ssize_t __pyx_t_9;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_fromslice", 0);
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
__pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1008
*
* if <PyObject *> memviewslice.memview == Py_None:
* return None # <<<<<<<<<<<<<<
*
*
*/
__Pyx_XDECREF(__pyx_r);
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
/* "View.MemoryView":1007
* cdef _memoryviewslice result
*
* if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<<
* return None
*
*/
}
/* "View.MemoryView":1013
*
*
* result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<<
*
* result.from_slice = memviewslice
*/
__pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(Py_None);
__Pyx_GIVEREF(Py_None);
PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None);
__Pyx_INCREF(__pyx_int_0);
__Pyx_GIVEREF(__pyx_int_0);
PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2);
__pyx_t_2 = 0;
__pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1013, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1015
* result = _memoryviewslice(None, 0, dtype_is_object)
*
* result.from_slice = memviewslice # <<<<<<<<<<<<<<
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
*/
__pyx_v_result->from_slice = __pyx_v_memviewslice;
/* "View.MemoryView":1016
*
* result.from_slice = memviewslice
* __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<<
*
* result.from_object = (<memoryview> memviewslice.memview).base
*/
__PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1);
/* "View.MemoryView":1018
* __PYX_INC_MEMVIEW(&memviewslice, 1)
*
* result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<<
* result.typeinfo = memviewslice.memview.typeinfo
*
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1018, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_GIVEREF(__pyx_t_2);
__Pyx_GOTREF(__pyx_v_result->from_object);
__Pyx_DECREF(__pyx_v_result->from_object);
__pyx_v_result->from_object = __pyx_t_2;
__pyx_t_2 = 0;
/* "View.MemoryView":1019
*
* result.from_object = (<memoryview> memviewslice.memview).base
* result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<<
*
* result.view = memviewslice.memview.view
*/
__pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo;
__pyx_v_result->__pyx_base.typeinfo = __pyx_t_4;
/* "View.MemoryView":1021
* result.typeinfo = memviewslice.memview.typeinfo
*
* result.view = memviewslice.memview.view # <<<<<<<<<<<<<<
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
*/
__pyx_t_5 = __pyx_v_memviewslice.memview->view;
__pyx_v_result->__pyx_base.view = __pyx_t_5;
/* "View.MemoryView":1022
*
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<<
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
*/
__pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data);
/* "View.MemoryView":1023
* result.view = memviewslice.memview.view
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim # <<<<<<<<<<<<<<
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None)
*/
__pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim;
/* "View.MemoryView":1024
* result.view.buf = <void *> memviewslice.data
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<<
* Py_INCREF(Py_None)
*
*/
((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None;
/* "View.MemoryView":1025
* result.view.ndim = ndim
* (<__pyx_buffer *> &result.view).obj = Py_None
* Py_INCREF(Py_None) # <<<<<<<<<<<<<<
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
*/
Py_INCREF(Py_None);
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
__pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1028
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE:
* result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<<
* else:
* result.flags = PyBUF_RECORDS_RO
*/
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS;
/* "View.MemoryView":1027
* Py_INCREF(Py_None)
*
* if (<memoryview>memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<<
* result.flags = PyBUF_RECORDS
* else:
*/
goto __pyx_L4;
}
/* "View.MemoryView":1030
* result.flags = PyBUF_RECORDS
* else:
* result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<<
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
*/
/*else*/ {
__pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO;
}
__pyx_L4:;
/* "View.MemoryView":1032
* result.flags = PyBUF_RECORDS_RO
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<<
* result.view.strides = <Py_ssize_t *> result.from_slice.strides
*
*/
__pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape);
/* "View.MemoryView":1033
*
* result.view.shape = <Py_ssize_t *> result.from_slice.shape
* result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides);
/* "View.MemoryView":1036
*
*
* result.view.suboffsets = NULL # <<<<<<<<<<<<<<
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
*/
__pyx_v_result->__pyx_base.view.suboffsets = NULL;
/* "View.MemoryView":1037
*
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<<
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
*/
__pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_v_suboffset = (__pyx_t_6[0]);
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
__pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1039
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets);
/* "View.MemoryView":1040
* if suboffset >= 0:
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break # <<<<<<<<<<<<<<
*
* result.view.len = result.view.itemsize
*/
goto __pyx_L6_break;
/* "View.MemoryView":1038
* result.view.suboffsets = NULL
* for suboffset in result.from_slice.suboffsets[:ndim]:
* if suboffset >= 0: # <<<<<<<<<<<<<<
* result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets
* break
*/
}
}
__pyx_L6_break:;
/* "View.MemoryView":1042
* break
*
* result.view.len = result.view.itemsize # <<<<<<<<<<<<<<
* for length in result.view.shape[:ndim]:
* result.view.len *= length
*/
__pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
/* "View.MemoryView":1043
*
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<<
* result.view.len *= length
*
*/
__pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim);
for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) {
__pyx_t_6 = __pyx_t_8;
__pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1043, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2);
__pyx_t_2 = 0;
/* "View.MemoryView":1044
* result.view.len = result.view.itemsize
* for length in result.view.shape[:ndim]:
* result.view.len *= length # <<<<<<<<<<<<<<
*
* result.to_object_func = to_object_func
*/
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(2, 1044, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__pyx_v_result->__pyx_base.view.len = __pyx_t_9;
}
/* "View.MemoryView":1046
* result.view.len *= length
*
* result.to_object_func = to_object_func # <<<<<<<<<<<<<<
* result.to_dtype_func = to_dtype_func
*
*/
__pyx_v_result->to_object_func = __pyx_v_to_object_func;
/* "View.MemoryView":1047
*
* result.to_object_func = to_object_func
* result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<<
*
* return result
*/
__pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func;
/* "View.MemoryView":1049
* result.to_dtype_func = to_dtype_func
*
* return result # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(((PyObject *)__pyx_v_result));
__pyx_r = ((PyObject *)__pyx_v_result);
goto __pyx_L0;
/* "View.MemoryView":999
*
* @cname('__pyx_memoryview_fromslice')
* cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<<
* int ndim,
* object (*to_object_func)(char *),
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_result);
__Pyx_XDECREF(__pyx_v_length);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) {
struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0;
__Pyx_memviewslice *__pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *__pyx_t_3 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("get_slice_from_memview", 0);
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1056
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice):
* obj = memview # <<<<<<<<<<<<<<
* return &obj.from_slice
* else:
*/
if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(2, 1056, __pyx_L1_error)
__pyx_t_3 = ((PyObject *)__pyx_v_memview);
__Pyx_INCREF(__pyx_t_3);
__pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3);
__pyx_t_3 = 0;
/* "View.MemoryView":1057
* if isinstance(memview, _memoryviewslice):
* obj = memview
* return &obj.from_slice # <<<<<<<<<<<<<<
* else:
* slice_copy(memview, mslice)
*/
__pyx_r = (&__pyx_v_obj->from_slice);
goto __pyx_L0;
/* "View.MemoryView":1055
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* obj = memview
* return &obj.from_slice
*/
}
/* "View.MemoryView":1059
* return &obj.from_slice
* else:
* slice_copy(memview, mslice) # <<<<<<<<<<<<<<
* return mslice
*
*/
/*else*/ {
__pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice);
/* "View.MemoryView":1060
* else:
* slice_copy(memview, mslice)
* return mslice # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_slice_copy')
*/
__pyx_r = __pyx_v_mslice;
goto __pyx_L0;
}
/* "View.MemoryView":1052
*
* @cname('__pyx_memoryview_get_slice_from_memoryview')
* cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *mslice) except NULL:
* cdef _memoryviewslice obj
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_3);
__Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF((PyObject *)__pyx_v_obj);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) {
int __pyx_v_dim;
Py_ssize_t *__pyx_v_shape;
Py_ssize_t *__pyx_v_strides;
Py_ssize_t *__pyx_v_suboffsets;
__Pyx_RefNannyDeclarations
Py_ssize_t *__pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
Py_ssize_t __pyx_t_5;
__Pyx_RefNannySetupContext("slice_copy", 0);
/* "View.MemoryView":1067
* cdef (Py_ssize_t*) shape, strides, suboffsets
*
* shape = memview.view.shape # <<<<<<<<<<<<<<
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets
*/
__pyx_t_1 = __pyx_v_memview->view.shape;
__pyx_v_shape = __pyx_t_1;
/* "View.MemoryView":1068
*
* shape = memview.view.shape
* strides = memview.view.strides # <<<<<<<<<<<<<<
* suboffsets = memview.view.suboffsets
*
*/
__pyx_t_1 = __pyx_v_memview->view.strides;
__pyx_v_strides = __pyx_t_1;
/* "View.MemoryView":1069
* shape = memview.view.shape
* strides = memview.view.strides
* suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<<
*
* dst.memview = <__pyx_memoryview *> memview
*/
__pyx_t_1 = __pyx_v_memview->view.suboffsets;
__pyx_v_suboffsets = __pyx_t_1;
/* "View.MemoryView":1071
* suboffsets = memview.view.suboffsets
*
* dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<<
* dst.data = <char *> memview.view.buf
*
*/
__pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview);
/* "View.MemoryView":1072
*
* dst.memview = <__pyx_memoryview *> memview
* dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<<
*
* for dim in range(memview.view.ndim):
*/
__pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf);
/* "View.MemoryView":1074
* dst.data = <char *> memview.view.buf
*
* for dim in range(memview.view.ndim): # <<<<<<<<<<<<<<
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
*/
__pyx_t_2 = __pyx_v_memview->view.ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_dim = __pyx_t_4;
/* "View.MemoryView":1075
*
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<<
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*/
(__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]);
/* "View.MemoryView":1076
* for dim in range(memview.view.ndim):
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<<
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1
*
*/
(__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]);
/* "View.MemoryView":1077
* dst.shape[dim] = shape[dim]
* dst.strides[dim] = strides[dim]
* dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object')
*/
if ((__pyx_v_suboffsets != 0)) {
__pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]);
} else {
__pyx_t_5 = -1L;
}
(__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5;
}
/* "View.MemoryView":1063
*
* @cname('__pyx_memoryview_slice_copy')
* cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<<
* cdef int dim
* cdef (Py_ssize_t*) shape, strides, suboffsets
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) {
__Pyx_memviewslice __pyx_v_memviewslice;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy", 0);
/* "View.MemoryView":1083
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<<
* return memoryview_copy_from_slice(memview, &memviewslice)
*
*/
__pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice));
/* "View.MemoryView":1084
* cdef __Pyx_memviewslice memviewslice
* slice_copy(memview, &memviewslice)
* return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_object_from_slice')
*/
__Pyx_XDECREF(__pyx_r);
__pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1084, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_r = __pyx_t_1;
__pyx_t_1 = 0;
goto __pyx_L0;
/* "View.MemoryView":1080
*
* @cname('__pyx_memoryview_copy_object')
* cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<<
* "Create a new memoryview object"
* cdef __Pyx_memviewslice memviewslice
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) {
PyObject *(*__pyx_v_to_object_func)(char *);
int (*__pyx_v_to_dtype_func)(char *, PyObject *);
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
int __pyx_t_2;
PyObject *(*__pyx_t_3)(char *);
int (*__pyx_t_4)(char *, PyObject *);
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0);
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
__pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type);
__pyx_t_2 = (__pyx_t_1 != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1095
*
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<<
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
*/
__pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func;
__pyx_v_to_object_func = __pyx_t_3;
/* "View.MemoryView":1096
* if isinstance(memview, _memoryviewslice):
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<<
* else:
* to_object_func = NULL
*/
__pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func;
__pyx_v_to_dtype_func = __pyx_t_4;
/* "View.MemoryView":1094
* cdef int (*to_dtype_func)(char *, object) except 0
*
* if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<<
* to_object_func = (<_memoryviewslice> memview).to_object_func
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
*/
goto __pyx_L3;
}
/* "View.MemoryView":1098
* to_dtype_func = (<_memoryviewslice> memview).to_dtype_func
* else:
* to_object_func = NULL # <<<<<<<<<<<<<<
* to_dtype_func = NULL
*
*/
/*else*/ {
__pyx_v_to_object_func = NULL;
/* "View.MemoryView":1099
* else:
* to_object_func = NULL
* to_dtype_func = NULL # <<<<<<<<<<<<<<
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
*/
__pyx_v_to_dtype_func = NULL;
}
__pyx_L3:;
/* "View.MemoryView":1101
* to_dtype_func = NULL
*
* return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<<
* to_object_func, to_dtype_func,
* memview.dtype_is_object)
*/
__Pyx_XDECREF(__pyx_r);
/* "View.MemoryView":1103
* return memoryview_fromslice(memviewslice[0], memview.view.ndim,
* to_object_func, to_dtype_func,
* memview.dtype_is_object) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(2, 1101, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_5);
__pyx_r = __pyx_t_5;
__pyx_t_5 = 0;
goto __pyx_L0;
/* "View.MemoryView":1087
*
* @cname('__pyx_memoryview_copy_object_from_slice')
* cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<<
* """
* Create a new memoryview object from a given memoryview object and slice.
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) {
Py_ssize_t __pyx_r;
int __pyx_t_1;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
__pyx_t_1 = ((__pyx_v_arg < 0) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1111
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0:
* return -arg # <<<<<<<<<<<<<<
* else:
* return arg
*/
__pyx_r = (-__pyx_v_arg);
goto __pyx_L0;
/* "View.MemoryView":1110
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil:
* if arg < 0: # <<<<<<<<<<<<<<
* return -arg
* else:
*/
}
/* "View.MemoryView":1113
* return -arg
* else:
* return arg # <<<<<<<<<<<<<<
*
* @cname('__pyx_get_best_slice_order')
*/
/*else*/ {
__pyx_r = __pyx_v_arg;
goto __pyx_L0;
}
/* "View.MemoryView":1109
*
*
* cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<<
* if arg < 0:
* return -arg
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) {
int __pyx_v_i;
Py_ssize_t __pyx_v_c_stride;
Py_ssize_t __pyx_v_f_stride;
char __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1121
* """
* cdef int i
* cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<<
* cdef Py_ssize_t f_stride = 0
*
*/
__pyx_v_c_stride = 0;
/* "View.MemoryView":1122
* cdef int i
* cdef Py_ssize_t c_stride = 0
* cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_f_stride = 0;
/* "View.MemoryView":1124
* cdef Py_ssize_t f_stride = 0
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1126
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1127
* if mslice.shape[i] > 1:
* c_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
goto __pyx_L4_break;
/* "View.MemoryView":1125
*
* for i in range(ndim - 1, -1, -1):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* c_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L4_break:;
/* "View.MemoryView":1129
* break
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
*/
__pyx_t_1 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_1;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
__pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1131
* for i in range(ndim):
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i] # <<<<<<<<<<<<<<
* break
*
*/
__pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1132
* if mslice.shape[i] > 1:
* f_stride = mslice.strides[i]
* break # <<<<<<<<<<<<<<
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
*/
goto __pyx_L7_break;
/* "View.MemoryView":1130
*
* for i in range(ndim):
* if mslice.shape[i] > 1: # <<<<<<<<<<<<<<
* f_stride = mslice.strides[i]
* break
*/
}
}
__pyx_L7_break:;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
__pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1135
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride):
* return 'C' # <<<<<<<<<<<<<<
* else:
* return 'F'
*/
__pyx_r = 'C';
goto __pyx_L0;
/* "View.MemoryView":1134
* break
*
* if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<<
* return 'C'
* else:
*/
}
/* "View.MemoryView":1137
* return 'C'
* else:
* return 'F' # <<<<<<<<<<<<<<
*
* @cython.cdivision(True)
*/
/*else*/ {
__pyx_r = 'F';
goto __pyx_L0;
}
/* "View.MemoryView":1116
*
* @cname('__pyx_get_best_slice_order')
* cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<<
* """
* Figure out the best memory access order for a given slice.
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent;
Py_ssize_t __pyx_v_dst_extent;
Py_ssize_t __pyx_v_src_stride;
Py_ssize_t __pyx_v_dst_stride;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
Py_ssize_t __pyx_t_4;
Py_ssize_t __pyx_t_5;
Py_ssize_t __pyx_t_6;
/* "View.MemoryView":1147
*
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
*/
__pyx_v_src_extent = (__pyx_v_src_shape[0]);
/* "View.MemoryView":1148
* cdef Py_ssize_t i
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0]
*/
__pyx_v_dst_extent = (__pyx_v_dst_shape[0]);
/* "View.MemoryView":1149
* cdef Py_ssize_t src_extent = src_shape[0]
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
*/
__pyx_v_src_stride = (__pyx_v_src_strides[0]);
/* "View.MemoryView":1150
* cdef Py_ssize_t dst_extent = dst_shape[0]
* cdef Py_ssize_t src_stride = src_strides[0]
* cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_dst_stride = (__pyx_v_dst_strides[0]);
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
__pyx_t_2 = ((__pyx_v_src_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
__pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0);
if (__pyx_t_2) {
} else {
__pyx_t_1 = __pyx_t_2;
goto __pyx_L5_bool_binop_done;
}
/* "View.MemoryView":1154
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
*/
__pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize);
if (__pyx_t_2) {
__pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride));
}
__pyx_t_3 = (__pyx_t_2 != 0);
__pyx_t_1 = __pyx_t_3;
__pyx_L5_bool_binop_done:;
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
if (__pyx_t_1) {
/* "View.MemoryView":1155
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)));
/* "View.MemoryView":1153
*
* if ndim == 1:
* if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<<
* <size_t> src_stride == itemsize == <size_t> dst_stride):
* memcpy(dst_data, src_data, itemsize * dst_extent)
*/
goto __pyx_L4;
}
/* "View.MemoryView":1157
* memcpy(dst_data, src_data, itemsize * dst_extent)
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1158
* else:
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<<
* src_data += src_stride
* dst_data += dst_stride
*/
(void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize));
/* "View.MemoryView":1159
* for i in range(dst_extent):
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
* else:
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1160
* memcpy(dst_data, src_data, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
* else:
* for i in range(dst_extent):
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L4:;
/* "View.MemoryView":1152
* cdef Py_ssize_t dst_stride = dst_strides[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* if (src_stride > 0 and dst_stride > 0 and
* <size_t> src_stride == itemsize == <size_t> dst_stride):
*/
goto __pyx_L3;
}
/* "View.MemoryView":1162
* dst_data += dst_stride
* else:
* for i in range(dst_extent): # <<<<<<<<<<<<<<
* _copy_strided_to_strided(src_data, src_strides + 1,
* dst_data, dst_strides + 1,
*/
/*else*/ {
__pyx_t_4 = __pyx_v_dst_extent;
__pyx_t_5 = __pyx_t_4;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1163
* else:
* for i in range(dst_extent):
* _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<<
* dst_data, dst_strides + 1,
* src_shape + 1, dst_shape + 1,
*/
_copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize);
/* "View.MemoryView":1167
* src_shape + 1, dst_shape + 1,
* ndim - 1, itemsize)
* src_data += src_stride # <<<<<<<<<<<<<<
* dst_data += dst_stride
*
*/
__pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride);
/* "View.MemoryView":1168
* ndim - 1, itemsize)
* src_data += src_stride
* dst_data += dst_stride # <<<<<<<<<<<<<<
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src,
*/
__pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1140
*
* @cython.cdivision(True)
* cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<<
* char *dst_data, Py_ssize_t *dst_strides,
* Py_ssize_t *src_shape, Py_ssize_t *dst_shape,
*/
/* function exit code */
}
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) {
/* "View.MemoryView":1173
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
* _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<<
* src.shape, dst.shape, ndim, itemsize)
*
*/
_copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1170
* dst_data += dst_stride
*
* cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *dst,
* int ndim, size_t itemsize) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) {
Py_ssize_t __pyx_v_shape;
Py_ssize_t __pyx_v_size;
Py_ssize_t __pyx_r;
Py_ssize_t __pyx_t_1;
Py_ssize_t *__pyx_t_2;
Py_ssize_t *__pyx_t_3;
Py_ssize_t *__pyx_t_4;
/* "View.MemoryView":1179
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil:
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<<
*
* for shape in src.shape[:ndim]:
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_size = __pyx_t_1;
/* "View.MemoryView":1181
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*
* for shape in src.shape[:ndim]: # <<<<<<<<<<<<<<
* size *= shape
*
*/
__pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim);
for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) {
__pyx_t_2 = __pyx_t_4;
__pyx_v_shape = (__pyx_t_2[0]);
/* "View.MemoryView":1182
*
* for shape in src.shape[:ndim]:
* size *= shape # <<<<<<<<<<<<<<
*
* return size
*/
__pyx_v_size = (__pyx_v_size * __pyx_v_shape);
}
/* "View.MemoryView":1184
* size *= shape
*
* return size # <<<<<<<<<<<<<<
*
* @cname('__pyx_fill_contig_strides_array')
*/
__pyx_r = __pyx_v_size;
goto __pyx_L0;
/* "View.MemoryView":1177
*
* @cname('__pyx_memoryview_slice_get_size')
* cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<<
* "Return the size of the memory occupied by the slice in number of bytes"
* cdef Py_ssize_t shape, size = src.memview.view.itemsize
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) {
int __pyx_v_idx;
Py_ssize_t __pyx_r;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
__pyx_t_1 = ((__pyx_v_order == 'F') != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1197
*
* if order == 'F':
* for idx in range(ndim): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
__pyx_t_2 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_idx = __pyx_t_4;
/* "View.MemoryView":1198
* if order == 'F':
* for idx in range(ndim):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
* else:
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1199
* for idx in range(ndim):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
* else:
* for idx in range(ndim - 1, -1, -1):
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
/* "View.MemoryView":1196
* cdef int idx
*
* if order == 'F': # <<<<<<<<<<<<<<
* for idx in range(ndim):
* strides[idx] = stride
*/
goto __pyx_L3;
}
/* "View.MemoryView":1201
* stride *= shape[idx]
* else:
* for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* strides[idx] = stride
* stride *= shape[idx]
*/
/*else*/ {
for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) {
__pyx_v_idx = __pyx_t_2;
/* "View.MemoryView":1202
* else:
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride # <<<<<<<<<<<<<<
* stride *= shape[idx]
*
*/
(__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride;
/* "View.MemoryView":1203
* for idx in range(ndim - 1, -1, -1):
* strides[idx] = stride
* stride *= shape[idx] # <<<<<<<<<<<<<<
*
* return stride
*/
__pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx]));
}
}
__pyx_L3:;
/* "View.MemoryView":1205
* stride *= shape[idx]
*
* return stride # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_data_to_temp')
*/
__pyx_r = __pyx_v_stride;
goto __pyx_L0;
/* "View.MemoryView":1187
*
* @cname('__pyx_fill_contig_strides_array')
* cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<<
* Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride,
* int ndim, char order) nogil:
*/
/* function exit code */
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) {
int __pyx_v_i;
void *__pyx_v_result;
size_t __pyx_v_itemsize;
size_t __pyx_v_size;
void *__pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
struct __pyx_memoryview_obj *__pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1219
* cdef void *result
*
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef size_t size = slice_get_size(src, ndim)
*
*/
__pyx_t_1 = __pyx_v_src->memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1220
*
* cdef size_t itemsize = src.memview.view.itemsize
* cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<<
*
* result = malloc(size)
*/
__pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim);
/* "View.MemoryView":1222
* cdef size_t size = slice_get_size(src, ndim)
*
* result = malloc(size) # <<<<<<<<<<<<<<
* if not result:
* _err(MemoryError, NULL)
*/
__pyx_v_result = malloc(__pyx_v_size);
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
__pyx_t_2 = ((!(__pyx_v_result != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1224
* result = malloc(size)
* if not result:
* _err(MemoryError, NULL) # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(2, 1224, __pyx_L1_error)
/* "View.MemoryView":1223
*
* result = malloc(size)
* if not result: # <<<<<<<<<<<<<<
* _err(MemoryError, NULL)
*
*/
}
/* "View.MemoryView":1227
*
*
* tmpslice.data = <char *> result # <<<<<<<<<<<<<<
* tmpslice.memview = src.memview
* for i in range(ndim):
*/
__pyx_v_tmpslice->data = ((char *)__pyx_v_result);
/* "View.MemoryView":1228
*
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview # <<<<<<<<<<<<<<
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
*/
__pyx_t_4 = __pyx_v_src->memview;
__pyx_v_tmpslice->memview = __pyx_t_4;
/* "View.MemoryView":1229
* tmpslice.data = <char *> result
* tmpslice.memview = src.memview
* for i in range(ndim): # <<<<<<<<<<<<<<
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1230
* tmpslice.memview = src.memview
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<<
* tmpslice.suboffsets[i] = -1
*
*/
(__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]);
/* "View.MemoryView":1231
* for i in range(ndim):
* tmpslice.shape[i] = src.shape[i]
* tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize,
*/
(__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1233
* tmpslice.suboffsets[i] = -1
*
* fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<<
* ndim, order)
*
*/
(void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order));
/* "View.MemoryView":1237
*
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0
*/
__pyx_t_3 = __pyx_v_ndim;
__pyx_t_5 = __pyx_t_3;
for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) {
__pyx_v_i = __pyx_t_6;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
__pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1239
* for i in range(ndim):
* if tmpslice.shape[i] == 1:
* tmpslice.strides[i] = 0 # <<<<<<<<<<<<<<
*
* if slice_is_contig(src[0], order, ndim):
*/
(__pyx_v_tmpslice->strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1238
*
* for i in range(ndim):
* if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<<
* tmpslice.strides[i] = 0
*
*/
}
}
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1242
*
* if slice_is_contig(src[0], order, ndim):
* memcpy(result, src.data, size) # <<<<<<<<<<<<<<
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*/
(void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size));
/* "View.MemoryView":1241
* tmpslice.strides[i] = 0
*
* if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<<
* memcpy(result, src.data, size)
* else:
*/
goto __pyx_L9;
}
/* "View.MemoryView":1244
* memcpy(result, src.data, size)
* else:
* copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<<
*
* return result
*/
/*else*/ {
copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize);
}
__pyx_L9:;
/* "View.MemoryView":1246
* copy_strided_to_strided(src, tmpslice, ndim, itemsize)
*
* return result # <<<<<<<<<<<<<<
*
*
*/
__pyx_r = __pyx_v_result;
goto __pyx_L0;
/* "View.MemoryView":1208
*
* @cname('__pyx_memoryview_copy_data_to_temp')
* cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice *tmpslice,
* char order,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = NULL;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_extents", 0);
/* "View.MemoryView":1254
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
* (i, extent1, extent2)) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err_dim')
*/
__pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1254, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_GIVEREF(__pyx_t_1);
PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_2);
PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2);
__Pyx_GIVEREF(__pyx_t_3);
PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3);
__pyx_t_1 = 0;
__pyx_t_2 = 0;
__pyx_t_3 = 0;
/* "View.MemoryView":1253
* cdef int _err_extents(int i, Py_ssize_t extent1,
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<<
* (i, extent1, extent2))
*
*/
__pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1253, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_4, 0, 0, 0);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__PYX_ERR(2, 1253, __pyx_L1_error)
/* "View.MemoryView":1251
*
* @cname('__pyx_memoryview_err_extents')
* cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<<
* Py_ssize_t extent2) except -1 with gil:
* raise ValueError("got differing extents in dimension %d (got %d and %d)" %
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) {
int __pyx_r;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err_dim", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1258
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil:
* raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_err')
*/
__pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_INCREF(__pyx_v_error);
__pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) {
__pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3);
if (likely(__pyx_t_2)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3);
__Pyx_INCREF(__pyx_t_2);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_3, function);
}
}
__pyx_t_1 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_2, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1258, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__Pyx_Raise(__pyx_t_1, 0, 0, 0);
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__PYX_ERR(2, 1258, __pyx_L1_error)
/* "View.MemoryView":1257
*
* @cname('__pyx_memoryview_err_dim')
* cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii') % dim)
*
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) {
int __pyx_r;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("_err", 0);
__Pyx_INCREF(__pyx_v_error);
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
__pyx_t_1 = ((__pyx_v_msg != NULL) != 0);
if (unlikely(__pyx_t_1)) {
/* "View.MemoryView":1263
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL:
* raise error(msg.decode('ascii')) # <<<<<<<<<<<<<<
* else:
* raise error
*/
__pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_INCREF(__pyx_v_error);
__pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_4, function);
}
}
__pyx_t_2 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_4, __pyx_t_5, __pyx_t_3) : __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 1263, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
__Pyx_Raise(__pyx_t_2, 0, 0, 0);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__PYX_ERR(2, 1263, __pyx_L1_error)
/* "View.MemoryView":1262
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil:
* if msg != NULL: # <<<<<<<<<<<<<<
* raise error(msg.decode('ascii'))
* else:
*/
}
/* "View.MemoryView":1265
* raise error(msg.decode('ascii'))
* else:
* raise error # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_copy_contents')
*/
/*else*/ {
__Pyx_Raise(__pyx_v_error, 0, 0, 0);
__PYX_ERR(2, 1265, __pyx_L1_error)
}
/* "View.MemoryView":1261
*
* @cname('__pyx_memoryview_err')
* cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<<
* if msg != NULL:
* raise error(msg.decode('ascii'))
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = -1;
__Pyx_XDECREF(__pyx_v_error);
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
return __pyx_r;
}
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) {
void *__pyx_v_tmpdata;
size_t __pyx_v_itemsize;
int __pyx_v_i;
char __pyx_v_order;
int __pyx_v_broadcasting;
int __pyx_v_direct_copy;
__Pyx_memviewslice __pyx_v_tmp;
int __pyx_v_ndim;
int __pyx_r;
Py_ssize_t __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
int __pyx_t_6;
void *__pyx_t_7;
int __pyx_t_8;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
/* "View.MemoryView":1276
* Check for overlapping memory and verify the shapes.
* """
* cdef void *tmpdata = NULL # <<<<<<<<<<<<<<
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
*/
__pyx_v_tmpdata = NULL;
/* "View.MemoryView":1277
* """
* cdef void *tmpdata = NULL
* cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<<
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
*/
__pyx_t_1 = __pyx_v_src.memview->view.itemsize;
__pyx_v_itemsize = __pyx_t_1;
/* "View.MemoryView":1279
* cdef size_t itemsize = src.memview.view.itemsize
* cdef int i
* cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<<
* cdef bint broadcasting = False
* cdef bint direct_copy = False
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim);
/* "View.MemoryView":1280
* cdef int i
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False # <<<<<<<<<<<<<<
* cdef bint direct_copy = False
* cdef __Pyx_memviewslice tmp
*/
__pyx_v_broadcasting = 0;
/* "View.MemoryView":1281
* cdef char order = get_best_order(&src, src_ndim)
* cdef bint broadcasting = False
* cdef bint direct_copy = False # <<<<<<<<<<<<<<
* cdef __Pyx_memviewslice tmp
*
*/
__pyx_v_direct_copy = 0;
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
__pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1285
*
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<<
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim);
/* "View.MemoryView":1284
* cdef __Pyx_memviewslice tmp
*
* if src_ndim < dst_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
*/
goto __pyx_L3;
}
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
__pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1287
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim:
* broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<<
*
* cdef int ndim = max(src_ndim, dst_ndim)
*/
__pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim);
/* "View.MemoryView":1286
* if src_ndim < dst_ndim:
* broadcast_leading(&src, src_ndim, dst_ndim)
* elif dst_ndim < src_ndim: # <<<<<<<<<<<<<<
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
*/
}
__pyx_L3:;
/* "View.MemoryView":1289
* broadcast_leading(&dst, dst_ndim, src_ndim)
*
* cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<<
*
* for i in range(ndim):
*/
__pyx_t_3 = __pyx_v_dst_ndim;
__pyx_t_4 = __pyx_v_src_ndim;
if (((__pyx_t_3 > __pyx_t_4) != 0)) {
__pyx_t_5 = __pyx_t_3;
} else {
__pyx_t_5 = __pyx_t_4;
}
__pyx_v_ndim = __pyx_t_5;
/* "View.MemoryView":1291
* cdef int ndim = max(src_ndim, dst_ndim)
*
* for i in range(ndim): # <<<<<<<<<<<<<<
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
*/
__pyx_t_5 = __pyx_v_ndim;
__pyx_t_3 = __pyx_t_5;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
__pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1294
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1:
* broadcasting = True # <<<<<<<<<<<<<<
* src.strides[i] = 0
* else:
*/
__pyx_v_broadcasting = 1;
/* "View.MemoryView":1295
* if src.shape[i] == 1:
* broadcasting = True
* src.strides[i] = 0 # <<<<<<<<<<<<<<
* else:
* _err_extents(i, dst.shape[i], src.shape[i])
*/
(__pyx_v_src.strides[__pyx_v_i]) = 0;
/* "View.MemoryView":1293
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]:
* if src.shape[i] == 1: # <<<<<<<<<<<<<<
* broadcasting = True
* src.strides[i] = 0
*/
goto __pyx_L7;
}
/* "View.MemoryView":1297
* src.strides[i] = 0
* else:
* _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<<
*
* if src.suboffsets[i] >= 0:
*/
/*else*/ {
__pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1297, __pyx_L1_error)
}
__pyx_L7:;
/* "View.MemoryView":1292
*
* for i in range(ndim):
* if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<<
* if src.shape[i] == 1:
* broadcasting = True
*/
}
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
__pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1300
*
* if src.suboffsets[i] >= 0:
* _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<<
*
* if slices_overlap(&src, &dst, ndim, itemsize):
*/
__pyx_t_6 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(2, 1300, __pyx_L1_error)
/* "View.MemoryView":1299
* _err_extents(i, dst.shape[i], src.shape[i])
*
* if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<<
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
*/
}
}
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
__pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
__pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1305
*
* if not slice_is_contig(src, order, ndim):
* order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<<
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
*/
__pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim);
/* "View.MemoryView":1304
* if slices_overlap(&src, &dst, ndim, itemsize):
*
* if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<<
* order = get_best_order(&dst, ndim)
*
*/
}
/* "View.MemoryView":1307
* order = get_best_order(&dst, ndim)
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<<
* src = tmp
*
*/
__pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(2, 1307, __pyx_L1_error)
__pyx_v_tmpdata = __pyx_t_7;
/* "View.MemoryView":1308
*
* tmpdata = copy_data_to_temp(&src, &tmp, order, ndim)
* src = tmp # <<<<<<<<<<<<<<
*
* if not broadcasting:
*/
__pyx_v_src = __pyx_v_tmp;
/* "View.MemoryView":1302
* _err_dim(ValueError, "Dimension %d is not direct", i)
*
* if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<<
*
* if not slice_is_contig(src, order, ndim):
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1314
*
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<<
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim)
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim);
/* "View.MemoryView":1313
*
*
* if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
*/
goto __pyx_L12;
}
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
__pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1316
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim):
* direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<<
*
* if direct_copy:
*/
__pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim);
/* "View.MemoryView":1315
* if slice_is_contig(src, 'C', ndim):
* direct_copy = slice_is_contig(dst, 'C', ndim)
* elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<<
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
*/
}
__pyx_L12:;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_2 = (__pyx_v_direct_copy != 0);
if (__pyx_t_2) {
/* "View.MemoryView":1320
* if direct_copy:
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1321
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
*/
(void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)));
/* "View.MemoryView":1322
* refcount_copying(&dst, dtype_is_object, ndim, False)
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
* free(tmpdata)
* return 0
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1323
* memcpy(dst.data, src.data, slice_get_size(&src, ndim))
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1324
* refcount_copying(&dst, dtype_is_object, ndim, True)
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* if order == 'F' == get_best_order(&dst, ndim):
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1318
* direct_copy = slice_is_contig(dst, 'F', ndim)
*
* if direct_copy: # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
}
/* "View.MemoryView":1310
* src = tmp
*
* if not broadcasting: # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_2 = (__pyx_v_order == 'F');
if (__pyx_t_2) {
__pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim));
}
__pyx_t_8 = (__pyx_t_2 != 0);
if (__pyx_t_8) {
/* "View.MemoryView":1329
*
*
* transpose_memslice(&src) # <<<<<<<<<<<<<<
* transpose_memslice(&dst)
*
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1329, __pyx_L1_error)
/* "View.MemoryView":1330
*
* transpose_memslice(&src)
* transpose_memslice(&dst) # <<<<<<<<<<<<<<
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
*/
__pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(2, 1330, __pyx_L1_error)
/* "View.MemoryView":1326
* return 0
*
* if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<<
*
*
*/
}
/* "View.MemoryView":1332
* transpose_memslice(&dst)
*
* refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1333
*
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<<
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
*/
copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize);
/* "View.MemoryView":1334
* refcount_copying(&dst, dtype_is_object, ndim, False)
* copy_strided_to_strided(&src, &dst, ndim, itemsize)
* refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
* free(tmpdata)
*/
__pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1336
* refcount_copying(&dst, dtype_is_object, ndim, True)
*
* free(tmpdata) # <<<<<<<<<<<<<<
* return 0
*
*/
free(__pyx_v_tmpdata);
/* "View.MemoryView":1337
*
* free(tmpdata)
* return 0 # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_broadcast_leading')
*/
__pyx_r = 0;
goto __pyx_L0;
/* "View.MemoryView":1268
*
* @cname('__pyx_memoryview_copy_contents')
* cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<<
* __Pyx_memviewslice dst,
* int src_ndim, int dst_ndim,
*/
/* function exit code */
__pyx_L1_error:;
{
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename);
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
__pyx_r = -1;
__pyx_L0:;
return __pyx_r;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) {
int __pyx_v_i;
int __pyx_v_offset;
int __pyx_t_1;
int __pyx_t_2;
int __pyx_t_3;
/* "View.MemoryView":1344
* int ndim_other) nogil:
* cdef int i
* cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<<
*
* for i in range(ndim - 1, -1, -1):
*/
__pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim);
/* "View.MemoryView":1346
* cdef int offset = ndim_other - ndim
*
* for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<<
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
*/
for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) {
__pyx_v_i = __pyx_t_1;
/* "View.MemoryView":1347
*
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<<
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*/
(__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]);
/* "View.MemoryView":1348
* for i in range(ndim - 1, -1, -1):
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<<
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
*/
(__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]);
/* "View.MemoryView":1349
* mslice.shape[i + offset] = mslice.shape[i]
* mslice.strides[i + offset] = mslice.strides[i]
* mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<<
*
* for i in range(offset):
*/
(__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]);
}
/* "View.MemoryView":1351
* mslice.suboffsets[i + offset] = mslice.suboffsets[i]
*
* for i in range(offset): # <<<<<<<<<<<<<<
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
*/
__pyx_t_1 = __pyx_v_offset;
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1352
*
* for i in range(offset):
* mslice.shape[i] = 1 # <<<<<<<<<<<<<<
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1
*/
(__pyx_v_mslice->shape[__pyx_v_i]) = 1;
/* "View.MemoryView":1353
* for i in range(offset):
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<<
* mslice.suboffsets[i] = -1
*
*/
(__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]);
/* "View.MemoryView":1354
* mslice.shape[i] = 1
* mslice.strides[i] = mslice.strides[0]
* mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<<
*
*
*/
(__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L;
}
/* "View.MemoryView":1340
*
* @cname('__pyx_memoryview_broadcast_leading')
* cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<<
* int ndim,
* int ndim_other) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) {
int __pyx_t_1;
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
__pyx_t_1 = (__pyx_v_dtype_is_object != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1367
*
* if dtype_is_object:
* refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<<
* dst.strides, ndim, inc)
*
*/
__pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1366
*
*
* if dtype_is_object: # <<<<<<<<<<<<<<
* refcount_objects_in_slice_with_gil(dst.data, dst.shape,
* dst.strides, ndim, inc)
*/
}
/* "View.MemoryView":1362
*
* @cname('__pyx_memoryview_refcount_copying')
* cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<<
* int ndim, bint inc) nogil:
*
*/
/* function exit code */
}
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
__Pyx_RefNannyDeclarations
#ifdef WITH_THREAD
PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure();
#endif
__Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0);
/* "View.MemoryView":1374
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
* refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<<
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc);
/* "View.MemoryView":1371
*
* @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil')
* cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* bint inc) with gil:
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
#ifdef WITH_THREAD
__Pyx_PyGILState_Release(__pyx_gilstate_save);
#endif
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
__Pyx_RefNannyDeclarations
Py_ssize_t __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
__Pyx_RefNannySetupContext("refcount_objects_in_slice", 0);
/* "View.MemoryView":1381
* cdef Py_ssize_t i
*
* for i in range(shape[0]): # <<<<<<<<<<<<<<
* if ndim == 1:
* if inc:
*/
__pyx_t_1 = (__pyx_v_shape[0]);
__pyx_t_2 = __pyx_t_1;
for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) {
__pyx_v_i = __pyx_t_3;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
__pyx_t_4 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
__pyx_t_4 = (__pyx_v_inc != 0);
if (__pyx_t_4) {
/* "View.MemoryView":1384
* if ndim == 1:
* if inc:
* Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* Py_DECREF((<PyObject **> data)[0])
*/
Py_INCREF((((PyObject **)__pyx_v_data)[0]));
/* "View.MemoryView":1383
* for i in range(shape[0]):
* if ndim == 1:
* if inc: # <<<<<<<<<<<<<<
* Py_INCREF((<PyObject **> data)[0])
* else:
*/
goto __pyx_L6;
}
/* "View.MemoryView":1386
* Py_INCREF((<PyObject **> data)[0])
* else:
* Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<<
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
*/
/*else*/ {
Py_DECREF((((PyObject **)__pyx_v_data)[0]));
}
__pyx_L6:;
/* "View.MemoryView":1382
*
* for i in range(shape[0]):
* if ndim == 1: # <<<<<<<<<<<<<<
* if inc:
* Py_INCREF((<PyObject **> data)[0])
*/
goto __pyx_L5;
}
/* "View.MemoryView":1388
* Py_DECREF((<PyObject **> data)[0])
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, inc)
*
*/
/*else*/ {
/* "View.MemoryView":1389
* else:
* refcount_objects_in_slice(data, shape + 1, strides + 1,
* ndim - 1, inc) # <<<<<<<<<<<<<<
*
* data += strides[0]
*/
__pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc);
}
__pyx_L5:;
/* "View.MemoryView":1391
* ndim - 1, inc)
*
* data += strides[0] # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0]));
}
/* "View.MemoryView":1377
*
* @cname('__pyx_memoryview_refcount_objects_in_slice')
* cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim, bint inc):
* cdef Py_ssize_t i
*/
/* function exit code */
__Pyx_RefNannyFinishContext();
}
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) {
/* "View.MemoryView":1400
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<<
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0);
/* "View.MemoryView":1401
* bint dtype_is_object) nogil:
* refcount_copying(dst, dtype_is_object, ndim, False)
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<<
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True)
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1403
* _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim,
* itemsize, item)
* refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<<
*
*
*/
__pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1);
/* "View.MemoryView":1397
*
* @cname('__pyx_memoryview_slice_assign_scalar')
* cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<<
* size_t itemsize, void *item,
* bint dtype_is_object) nogil:
*/
/* function exit code */
}
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) {
CYTHON_UNUSED Py_ssize_t __pyx_v_i;
Py_ssize_t __pyx_v_stride;
Py_ssize_t __pyx_v_extent;
int __pyx_t_1;
Py_ssize_t __pyx_t_2;
Py_ssize_t __pyx_t_3;
Py_ssize_t __pyx_t_4;
/* "View.MemoryView":1411
* size_t itemsize, void *item) nogil:
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<<
* cdef Py_ssize_t extent = shape[0]
*
*/
__pyx_v_stride = (__pyx_v_strides[0]);
/* "View.MemoryView":1412
* cdef Py_ssize_t i
* cdef Py_ssize_t stride = strides[0]
* cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<<
*
* if ndim == 1:
*/
__pyx_v_extent = (__pyx_v_shape[0]);
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
__pyx_t_1 = ((__pyx_v_ndim == 1) != 0);
if (__pyx_t_1) {
/* "View.MemoryView":1415
*
* if ndim == 1:
* for i in range(extent): # <<<<<<<<<<<<<<
* memcpy(data, item, itemsize)
* data += stride
*/
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1416
* if ndim == 1:
* for i in range(extent):
* memcpy(data, item, itemsize) # <<<<<<<<<<<<<<
* data += stride
* else:
*/
(void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize));
/* "View.MemoryView":1417
* for i in range(extent):
* memcpy(data, item, itemsize)
* data += stride # <<<<<<<<<<<<<<
* else:
* for i in range(extent):
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
/* "View.MemoryView":1414
* cdef Py_ssize_t extent = shape[0]
*
* if ndim == 1: # <<<<<<<<<<<<<<
* for i in range(extent):
* memcpy(data, item, itemsize)
*/
goto __pyx_L3;
}
/* "View.MemoryView":1419
* data += stride
* else:
* for i in range(extent): # <<<<<<<<<<<<<<
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
*/
/*else*/ {
__pyx_t_2 = __pyx_v_extent;
__pyx_t_3 = __pyx_t_2;
for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) {
__pyx_v_i = __pyx_t_4;
/* "View.MemoryView":1420
* else:
* for i in range(extent):
* _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<<
* ndim - 1, itemsize, item)
* data += stride
*/
__pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item);
/* "View.MemoryView":1422
* _slice_assign_scalar(data, shape + 1, strides + 1,
* ndim - 1, itemsize, item)
* data += stride # <<<<<<<<<<<<<<
*
*
*/
__pyx_v_data = (__pyx_v_data + __pyx_v_stride);
}
}
__pyx_L3:;
/* "View.MemoryView":1407
*
* @cname('__pyx_memoryview__slice_assign_scalar')
* cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<<
* Py_ssize_t *strides, int ndim,
* size_t itemsize, void *item) nogil:
*/
/* function exit code */
}
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* Python wrapper */
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/
static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0};
static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) {
PyObject *__pyx_v___pyx_type = 0;
long __pyx_v___pyx_checksum;
PyObject *__pyx_v___pyx_state = 0;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
PyObject *__pyx_r = 0;
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0);
{
static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0};
PyObject* values[3] = {0,0,0};
if (unlikely(__pyx_kwds)) {
Py_ssize_t kw_args;
const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args);
switch (pos_args) {
case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
CYTHON_FALLTHROUGH;
case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
CYTHON_FALLTHROUGH;
case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
CYTHON_FALLTHROUGH;
case 0: break;
default: goto __pyx_L5_argtuple_error;
}
kw_args = PyDict_Size(__pyx_kwds);
switch (pos_args) {
case 0:
if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--;
else goto __pyx_L5_argtuple_error;
CYTHON_FALLTHROUGH;
case 1:
if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(2, 1, __pyx_L3_error)
}
CYTHON_FALLTHROUGH;
case 2:
if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--;
else {
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(2, 1, __pyx_L3_error)
}
}
if (unlikely(kw_args > 0)) {
if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(2, 1, __pyx_L3_error)
}
} else if (PyTuple_GET_SIZE(__pyx_args) != 3) {
goto __pyx_L5_argtuple_error;
} else {
values[0] = PyTuple_GET_ITEM(__pyx_args, 0);
values[1] = PyTuple_GET_ITEM(__pyx_args, 1);
values[2] = PyTuple_GET_ITEM(__pyx_args, 2);
}
__pyx_v___pyx_type = values[0];
__pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(2, 1, __pyx_L3_error)
__pyx_v___pyx_state = values[2];
}
goto __pyx_L4_argument_unpacking_done;
__pyx_L5_argtuple_error:;
__Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(2, 1, __pyx_L3_error)
__pyx_L3_error:;
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__Pyx_RefNannyFinishContext();
return NULL;
__pyx_L4_argument_unpacking_done:;
__pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state);
/* function exit code */
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_v___pyx_PickleError = 0;
PyObject *__pyx_v___pyx_result = 0;
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
int __pyx_t_1;
PyObject *__pyx_t_2 = NULL;
PyObject *__pyx_t_3 = NULL;
PyObject *__pyx_t_4 = NULL;
PyObject *__pyx_t_5 = NULL;
int __pyx_t_6;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0);
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
__pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0);
if (__pyx_t_1) {
/* "(tree fragment)":5
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<<
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
*/
__pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_n_s_PickleError);
__Pyx_GIVEREF(__pyx_n_s_PickleError);
PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError);
__pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 5, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__Pyx_INCREF(__pyx_t_2);
__pyx_v___pyx_PickleError = __pyx_t_2;
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":6
* if __pyx_checksum != 0xb068931:
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<<
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
*/
__pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_4);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_INCREF(__pyx_v___pyx_PickleError);
__pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL;
if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_5)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_5);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_5) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_5, __pyx_t_4) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4);
__Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0;
__Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__Pyx_Raise(__pyx_t_3, 0, 0, 0);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
__PYX_ERR(2, 6, __pyx_L1_error)
/* "(tree fragment)":4
* cdef object __pyx_PickleError
* cdef object __pyx_result
* if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<<
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
*/
}
/* "(tree fragment)":7
* from pickle import PickleError as __pyx_PickleError
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<<
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
*/
__pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(2, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_2);
__pyx_t_4 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) {
__pyx_t_4 = PyMethod_GET_SELF(__pyx_t_2);
if (likely(__pyx_t_4)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2);
__Pyx_INCREF(__pyx_t_4);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_2, function);
}
}
__pyx_t_3 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_4, __pyx_v___pyx_type) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type);
__Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0;
if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 7, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0;
__pyx_v___pyx_result = __pyx_t_3;
__pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
__pyx_t_1 = (__pyx_v___pyx_state != Py_None);
__pyx_t_6 = (__pyx_t_1 != 0);
if (__pyx_t_6) {
/* "(tree fragment)":9
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<<
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
*/
if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(2, 9, __pyx_L1_error)
__pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(2, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_3);
__Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0;
/* "(tree fragment)":8
* raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum)
* __pyx_result = Enum.__new__(__pyx_type)
* if __pyx_state is not None: # <<<<<<<<<<<<<<
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
*/
}
/* "(tree fragment)":10
* if __pyx_state is not None:
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result # <<<<<<<<<<<<<<
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
*/
__Pyx_XDECREF(__pyx_r);
__Pyx_INCREF(__pyx_v___pyx_result);
__pyx_r = __pyx_v___pyx_result;
goto __pyx_L0;
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
/* function exit code */
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_2);
__Pyx_XDECREF(__pyx_t_3);
__Pyx_XDECREF(__pyx_t_4);
__Pyx_XDECREF(__pyx_t_5);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = NULL;
__pyx_L0:;
__Pyx_XDECREF(__pyx_v___pyx_PickleError);
__Pyx_XDECREF(__pyx_v___pyx_result);
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) {
PyObject *__pyx_r = NULL;
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_t_2;
Py_ssize_t __pyx_t_3;
int __pyx_t_4;
int __pyx_t_5;
PyObject *__pyx_t_6 = NULL;
PyObject *__pyx_t_7 = NULL;
PyObject *__pyx_t_8 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0);
/* "(tree fragment)":12
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<<
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(2, 12, __pyx_L1_error)
}
__pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__Pyx_GOTREF(__pyx_v___pyx_result->name);
__Pyx_DECREF(__pyx_v___pyx_result->name);
__pyx_v___pyx_result->name = __pyx_t_1;
__pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()");
__PYX_ERR(2, 13, __pyx_L1_error)
}
__pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
__pyx_t_4 = ((__pyx_t_3 > 1) != 0);
if (__pyx_t_4) {
} else {
__pyx_t_2 = __pyx_t_4;
goto __pyx_L4_bool_binop_done;
}
__pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 13, __pyx_L1_error)
__pyx_t_5 = (__pyx_t_4 != 0);
__pyx_t_2 = __pyx_t_5;
__pyx_L4_bool_binop_done:;
if (__pyx_t_2) {
/* "(tree fragment)":14
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
* __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<<
*/
__pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_7);
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(__pyx_v___pyx_state == Py_None)) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable");
__PYX_ERR(2, 14, __pyx_L1_error)
}
__pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_6);
__pyx_t_8 = NULL;
if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) {
__pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7);
if (likely(__pyx_t_8)) {
PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7);
__Pyx_INCREF(__pyx_t_8);
__Pyx_INCREF(function);
__Pyx_DECREF_SET(__pyx_t_7, function);
}
}
__pyx_t_1 = (__pyx_t_8) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_8, __pyx_t_6) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6);
__Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0;
__Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0;
if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 14, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0;
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":13
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state):
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<<
* __pyx_result.__dict__.update(__pyx_state[1])
*/
}
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/* function exit code */
__pyx_r = Py_None; __Pyx_INCREF(Py_None);
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_XDECREF(__pyx_t_6);
__Pyx_XDECREF(__pyx_t_7);
__Pyx_XDECREF(__pyx_t_8);
__Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename);
__pyx_r = 0;
__pyx_L0:;
__Pyx_XGIVEREF(__pyx_r);
__Pyx_RefNannyFinishContext();
return __pyx_r;
}
static struct __pyx_vtabstruct_array __pyx_vtable_array;
static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_array_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_array_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_array;
p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None);
p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None);
if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_array(PyObject *o) {
struct __pyx_array_obj *p = (struct __pyx_array_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_array___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->mode);
Py_CLEAR(p->_format);
(*Py_TYPE(o)->tp_free)(o);
}
static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_array___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) {
PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n);
if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
v = __pyx_array___getattr__(o, n);
}
return v;
}
static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o);
}
static PyMethodDef __pyx_methods_array[] = {
{"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_array[] = {
{(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_array = {
__pyx_array___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_array, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_array = {
__pyx_array___len__, /*mp_length*/
__pyx_array___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_array, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_array = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_array_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_array = {
PyVarObject_HEAD_INIT(0, 0)
"draco.util.truncate.array", /*tp_name*/
sizeof(struct __pyx_array_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_array, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
0, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_array, /*tp_as_sequence*/
&__pyx_tp_as_mapping_array, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
__pyx_tp_getattro_array, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_array, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/
0, /*tp_doc*/
0, /*tp_traverse*/
0, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_array, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_array, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_array, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) {
struct __pyx_MemviewEnum_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_MemviewEnum_obj *)o);
p->name = Py_None; Py_INCREF(Py_None);
return o;
}
static void __pyx_tp_dealloc_Enum(PyObject *o) {
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
Py_CLEAR(p->name);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
if (p->name) {
e = (*v)(p->name, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_Enum(PyObject *o) {
PyObject* tmp;
struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o;
tmp = ((PyObject*)p->name);
p->name = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
return 0;
}
static PyMethodDef __pyx_methods_Enum[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_MemviewEnum = {
PyVarObject_HEAD_INIT(0, 0)
"draco.util.truncate.Enum", /*tp_name*/
sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_Enum, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_MemviewEnum___repr__, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_Enum, /*tp_traverse*/
__pyx_tp_clear_Enum, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_Enum, /*tp_methods*/
0, /*tp_members*/
0, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
__pyx_MemviewEnum___init__, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_Enum, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview;
static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryview_obj *p;
PyObject *o;
if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) {
o = (*t->tp_alloc)(t, 0);
} else {
o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0);
}
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryview_obj *)o);
p->__pyx_vtab = __pyx_vtabptr_memoryview;
p->obj = Py_None; Py_INCREF(Py_None);
p->_size = Py_None; Py_INCREF(Py_None);
p->_array_interface = Py_None; Py_INCREF(Py_None);
p->view.obj = NULL;
if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad;
return o;
bad:
Py_DECREF(o); o = 0;
return NULL;
}
static void __pyx_tp_dealloc_memoryview(PyObject *o) {
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryview___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->obj);
Py_CLEAR(p->_size);
Py_CLEAR(p->_array_interface);
(*Py_TYPE(o)->tp_free)(o);
}
static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
if (p->obj) {
e = (*v)(p->obj, a); if (e) return e;
}
if (p->_size) {
e = (*v)(p->_size, a); if (e) return e;
}
if (p->_array_interface) {
e = (*v)(p->_array_interface, a); if (e) return e;
}
if (p->view.obj) {
e = (*v)(p->view.obj, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear_memoryview(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o;
tmp = ((PyObject*)p->obj);
p->obj = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_size);
p->_size = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
tmp = ((PyObject*)p->_array_interface);
p->_array_interface = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
Py_CLEAR(p->view.obj);
return 0;
}
static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) {
PyObject *r;
PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0;
r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x);
Py_DECREF(x);
return r;
}
static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) {
if (v) {
return __pyx_memoryview___setitem__(o, i, v);
}
else {
PyErr_Format(PyExc_NotImplementedError,
"Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name);
return -1;
}
}
static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o);
}
static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o);
}
static PyMethodDef __pyx_methods_memoryview[] = {
{"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0},
{"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0},
{"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0},
{"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0},
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets_memoryview[] = {
{(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0},
{(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0},
{(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0},
{(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0},
{(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0},
{(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0},
{(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0},
{(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0},
{(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PySequenceMethods __pyx_tp_as_sequence_memoryview = {
__pyx_memoryview___len__, /*sq_length*/
0, /*sq_concat*/
0, /*sq_repeat*/
__pyx_sq_item_memoryview, /*sq_item*/
0, /*sq_slice*/
0, /*sq_ass_item*/
0, /*sq_ass_slice*/
0, /*sq_contains*/
0, /*sq_inplace_concat*/
0, /*sq_inplace_repeat*/
};
static PyMappingMethods __pyx_tp_as_mapping_memoryview = {
__pyx_memoryview___len__, /*mp_length*/
__pyx_memoryview___getitem__, /*mp_subscript*/
__pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/
};
static PyBufferProcs __pyx_tp_as_buffer_memoryview = {
#if PY_MAJOR_VERSION < 3
0, /*bf_getreadbuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getwritebuffer*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getsegcount*/
#endif
#if PY_MAJOR_VERSION < 3
0, /*bf_getcharbuffer*/
#endif
__pyx_memoryview_getbuffer, /*bf_getbuffer*/
0, /*bf_releasebuffer*/
};
static PyTypeObject __pyx_type___pyx_memoryview = {
PyVarObject_HEAD_INIT(0, 0)
"draco.util.truncate.memoryview", /*tp_name*/
sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc_memoryview, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
__pyx_memoryview___repr__, /*tp_repr*/
0, /*tp_as_number*/
&__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/
&__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
__pyx_memoryview___str__, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
&__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
0, /*tp_doc*/
__pyx_tp_traverse_memoryview, /*tp_traverse*/
__pyx_tp_clear_memoryview, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods_memoryview, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets_memoryview, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new_memoryview, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice;
static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) {
struct __pyx_memoryviewslice_obj *p;
PyObject *o = __pyx_tp_new_memoryview(t, a, k);
if (unlikely(!o)) return 0;
p = ((struct __pyx_memoryviewslice_obj *)o);
p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice;
p->from_object = Py_None; Py_INCREF(Py_None);
p->from_slice.memview = NULL;
return o;
}
static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) {
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
#if CYTHON_USE_TP_FINALIZE
if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) {
if (PyObject_CallFinalizerFromDealloc(o)) return;
}
#endif
PyObject_GC_UnTrack(o);
{
PyObject *etype, *eval, *etb;
PyErr_Fetch(&etype, &eval, &etb);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1);
__pyx_memoryviewslice___dealloc__(o);
__Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1);
PyErr_Restore(etype, eval, etb);
}
Py_CLEAR(p->from_object);
PyObject_GC_Track(o);
__pyx_tp_dealloc_memoryview(o);
}
static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) {
int e;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e;
if (p->from_object) {
e = (*v)(p->from_object, a); if (e) return e;
}
return 0;
}
static int __pyx_tp_clear__memoryviewslice(PyObject *o) {
PyObject* tmp;
struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o;
__pyx_tp_clear_memoryview(o);
tmp = ((PyObject*)p->from_object);
p->from_object = Py_None; Py_INCREF(Py_None);
Py_XDECREF(tmp);
__PYX_XDEC_MEMVIEW(&p->from_slice, 1);
return 0;
}
static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) {
return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o);
}
static PyMethodDef __pyx_methods__memoryviewslice[] = {
{"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0},
{"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0},
{0, 0, 0, 0}
};
static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = {
{(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0},
{0, 0, 0, 0, 0}
};
static PyTypeObject __pyx_type___pyx_memoryviewslice = {
PyVarObject_HEAD_INIT(0, 0)
"draco.util.truncate._memoryviewslice", /*tp_name*/
sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/
0, /*tp_itemsize*/
__pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/
#if PY_VERSION_HEX < 0x030800b4
0, /*tp_print*/
#endif
#if PY_VERSION_HEX >= 0x030800b4
0, /*tp_vectorcall_offset*/
#endif
0, /*tp_getattr*/
0, /*tp_setattr*/
#if PY_MAJOR_VERSION < 3
0, /*tp_compare*/
#endif
#if PY_MAJOR_VERSION >= 3
0, /*tp_as_async*/
#endif
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___repr__, /*tp_repr*/
#else
0, /*tp_repr*/
#endif
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash*/
0, /*tp_call*/
#if CYTHON_COMPILING_IN_PYPY
__pyx_memoryview___str__, /*tp_str*/
#else
0, /*tp_str*/
#endif
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/
"Internal class for passing memoryview slices to Python", /*tp_doc*/
__pyx_tp_traverse__memoryviewslice, /*tp_traverse*/
__pyx_tp_clear__memoryviewslice, /*tp_clear*/
0, /*tp_richcompare*/
0, /*tp_weaklistoffset*/
0, /*tp_iter*/
0, /*tp_iternext*/
__pyx_methods__memoryviewslice, /*tp_methods*/
0, /*tp_members*/
__pyx_getsets__memoryviewslice, /*tp_getset*/
0, /*tp_base*/
0, /*tp_dict*/
0, /*tp_descr_get*/
0, /*tp_descr_set*/
0, /*tp_dictoffset*/
0, /*tp_init*/
0, /*tp_alloc*/
__pyx_tp_new__memoryviewslice, /*tp_new*/
0, /*tp_free*/
0, /*tp_is_gc*/
0, /*tp_bases*/
0, /*tp_mro*/
0, /*tp_cache*/
0, /*tp_subclasses*/
0, /*tp_weaklist*/
0, /*tp_del*/
0, /*tp_version_tag*/
#if PY_VERSION_HEX >= 0x030400a1
0, /*tp_finalize*/
#endif
#if PY_VERSION_HEX >= 0x030800b1
0, /*tp_vectorcall*/
#endif
#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000
0, /*tp_print*/
#endif
};
static PyMethodDef __pyx_methods[] = {
{0, 0, 0, 0}
};
#if PY_MAJOR_VERSION >= 3
#if CYTHON_PEP489_MULTI_PHASE_INIT
static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/
static int __pyx_pymod_exec_truncate(PyObject* module); /*proto*/
static PyModuleDef_Slot __pyx_moduledef_slots[] = {
{Py_mod_create, (void*)__pyx_pymod_create},
{Py_mod_exec, (void*)__pyx_pymod_exec_truncate},
{0, NULL}
};
#endif
static struct PyModuleDef __pyx_moduledef = {
PyModuleDef_HEAD_INIT,
"truncate",
__pyx_k_draco_truncation_utils, /* m_doc */
#if CYTHON_PEP489_MULTI_PHASE_INIT
0, /* m_size */
#else
-1, /* m_size */
#endif
__pyx_methods /* m_methods */,
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_moduledef_slots, /* m_slots */
#else
NULL, /* m_reload */
#endif
NULL, /* m_traverse */
NULL, /* m_clear */
NULL /* m_free */
};
#endif
#ifndef CYTHON_SMALL_CODE
#if defined(__clang__)
#define CYTHON_SMALL_CODE
#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3))
#define CYTHON_SMALL_CODE __attribute__((cold))
#else
#define CYTHON_SMALL_CODE
#endif
#endif
static __Pyx_StringTabEntry __pyx_string_tab[] = {
{&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1},
{&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0},
{&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0},
{&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0},
{&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1},
{&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0},
{&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1},
{&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0},
{&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1},
{&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0},
{&__pyx_kp_s_Input_array_must_be_1_d, __pyx_k_Input_array_must_be_1_d, sizeof(__pyx_k_Input_array_must_be_1_d), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0},
{&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0},
{&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1},
{&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0},
{&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0},
{&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1},
{&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0},
{&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1},
{&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0},
{&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1},
{&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1},
{&__pyx_kp_s_Weight_and_value_arrays_must_hav, __pyx_k_Weight_and_value_arrays_must_hav, sizeof(__pyx_k_Weight_and_value_arrays_must_hav), 0, 0, 1, 0},
{&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1},
{&__pyx_n_s_asarray, __pyx_k_asarray, sizeof(__pyx_k_asarray), 0, 0, 1, 1},
{&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1},
{&__pyx_n_s_bit_truncate, __pyx_k_bit_truncate, sizeof(__pyx_k_bit_truncate), 0, 0, 1, 1},
{&__pyx_n_s_bit_truncate_fixed, __pyx_k_bit_truncate_fixed, sizeof(__pyx_k_bit_truncate_fixed), 0, 0, 1, 1},
{&__pyx_n_s_bit_truncate_weights, __pyx_k_bit_truncate_weights, sizeof(__pyx_k_bit_truncate_weights), 0, 0, 1, 1},
{&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1},
{&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1},
{&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1},
{&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1},
{&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0},
{&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1},
{&__pyx_n_s_draco_util_truncate, __pyx_k_draco_util_truncate, sizeof(__pyx_k_draco_util_truncate), 0, 0, 1, 1},
{&__pyx_kp_s_draco_util_truncate_pyx, __pyx_k_draco_util_truncate_pyx, sizeof(__pyx_k_draco_util_truncate_pyx), 0, 0, 1, 0},
{&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1},
{&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1},
{&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1},
{&__pyx_n_s_err, __pyx_k_err, sizeof(__pyx_k_err), 0, 0, 1, 1},
{&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1},
{&__pyx_n_s_fallback, __pyx_k_fallback, sizeof(__pyx_k_fallback), 0, 0, 1, 1},
{&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1},
{&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1},
{&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1},
{&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1},
{&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1},
{&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0},
{&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1},
{&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1},
{&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1},
{&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1},
{&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0},
{&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1},
{&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1},
{&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1},
{&__pyx_n_s_n, __pyx_k_n, sizeof(__pyx_k_n), 0, 0, 1, 1},
{&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1},
{&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1},
{&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1},
{&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1},
{&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0},
{&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1},
{&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1},
{&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0},
{&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0},
{&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1},
{&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1},
{&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1},
{&__pyx_n_s_prec, __pyx_k_prec, sizeof(__pyx_k_prec), 0, 0, 1, 1},
{&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1},
{&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1},
{&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1},
{&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1},
{&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1},
{&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1},
{&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1},
{&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1},
{&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1},
{&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1},
{&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1},
{&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1},
{&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1},
{&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1},
{&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1},
{&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1},
{&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1},
{&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1},
{&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0},
{&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0},
{&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1},
{&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1},
{&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0},
{&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0},
{&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1},
{&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1},
{&__pyx_n_s_val, __pyx_k_val, sizeof(__pyx_k_val), 0, 0, 1, 1},
{&__pyx_n_s_wgt, __pyx_k_wgt, sizeof(__pyx_k_wgt), 0, 0, 1, 1},
{0, 0, 0, 0, 0, 0, 0}
};
static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) {
__pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 20, __pyx_L1_error)
__pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 43, __pyx_L1_error)
__pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(1, 884, __pyx_L1_error)
__pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(2, 148, __pyx_L1_error)
__pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(2, 151, __pyx_L1_error)
__pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(2, 2, __pyx_L1_error)
__pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(2, 404, __pyx_L1_error)
__pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(2, 613, __pyx_L1_error)
__pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(2, 832, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0);
/* "draco/util/truncate.pyx":20
* cdef int n = val.shape[0]
* if val.ndim != 1:
* raise ValueError("Input array must be 1-d.") # <<<<<<<<<<<<<<
* if wgt.shape[0] != n:
* raise ValueError(
*/
__pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Input_array_must_be_1_d); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 20, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple_);
__Pyx_GIVEREF(__pyx_tuple_);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":884
* __pyx_import_array()
* except Exception:
* raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_umath() except -1:
*/
__pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 884, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__2);
__Pyx_GIVEREF(__pyx_tuple__2);
/* "../../../../../../cvmfs/soft.computecanada.ca/easybuild/software/2020/avx2/Core/scipy-stack/2020b/lib/python3.8/site-packages/numpy/__init__.pxd":890
* _import_umath()
* except Exception:
* raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<<
*
* cdef inline int import_ufunc() except -1:
*/
__pyx_tuple__3 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 890, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__3);
__Pyx_GIVEREF(__pyx_tuple__3);
/* "View.MemoryView":133
*
* if not self.ndim:
* raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<<
*
* if itemsize <= 0:
*/
__pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(2, 133, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__4);
__Pyx_GIVEREF(__pyx_tuple__4);
/* "View.MemoryView":136
*
* if itemsize <= 0:
* raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<<
*
* if not isinstance(format, bytes):
*/
__pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(2, 136, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__5);
__Pyx_GIVEREF(__pyx_tuple__5);
/* "View.MemoryView":148
*
* if not self._shape:
* raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(2, 148, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__6);
__Pyx_GIVEREF(__pyx_tuple__6);
/* "View.MemoryView":176
* self.data = <char *>malloc(self.len)
* if not self.data:
* raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<<
*
* if self.dtype_is_object:
*/
__pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(2, 176, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__7);
__Pyx_GIVEREF(__pyx_tuple__7);
/* "View.MemoryView":192
* bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS
* if not (flags & bufmode):
* raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<<
* info.buf = self.data
* info.len = self.len
*/
__pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(2, 192, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__8);
__Pyx_GIVEREF(__pyx_tuple__8);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__9);
__Pyx_GIVEREF(__pyx_tuple__9);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__10);
__Pyx_GIVEREF(__pyx_tuple__10);
/* "View.MemoryView":418
* def __setitem__(memoryview self, object index, object value):
* if self.view.readonly:
* raise TypeError("Cannot assign to read-only memoryview") # <<<<<<<<<<<<<<
*
* have_slices, index = _unellipsify(index, self.view.ndim)
*/
__pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_s_Cannot_assign_to_read_only_memor); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(2, 418, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__11);
__Pyx_GIVEREF(__pyx_tuple__11);
/* "View.MemoryView":495
* result = struct.unpack(self.view.format, bytesitem)
* except struct.error:
* raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<<
* else:
* if len(self.view.format) == 1:
*/
__pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(2, 495, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__12);
__Pyx_GIVEREF(__pyx_tuple__12);
/* "View.MemoryView":520
* def __getbuffer__(self, Py_buffer *info, int flags):
* if flags & PyBUF_WRITABLE and self.view.readonly:
* raise ValueError("Cannot create writable memory view from read-only memoryview") # <<<<<<<<<<<<<<
*
* if flags & PyBUF_ND:
*/
__pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_Cannot_create_writable_memory_vi); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 520, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__13);
__Pyx_GIVEREF(__pyx_tuple__13);
/* "View.MemoryView":570
* if self.view.strides == NULL:
*
* raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<<
*
* return tuple([stride for stride in self.view.strides[:self.view.ndim]])
*/
__pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 570, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__14);
__Pyx_GIVEREF(__pyx_tuple__14);
/* "View.MemoryView":577
* def suboffsets(self):
* if self.view.suboffsets == NULL:
* return (-1,) * self.view.ndim # <<<<<<<<<<<<<<
*
* return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]])
*/
__pyx_tuple__15 = PyTuple_New(1); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(2, 577, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__15);
__Pyx_INCREF(__pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_int_neg_1);
PyTuple_SET_ITEM(__pyx_tuple__15, 0, __pyx_int_neg_1);
__Pyx_GIVEREF(__pyx_tuple__15);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__16);
__Pyx_GIVEREF(__pyx_tuple__16);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__17);
__Pyx_GIVEREF(__pyx_tuple__17);
/* "View.MemoryView":682
* if item is Ellipsis:
* if not seen_ellipsis:
* result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<<
* seen_ellipsis = True
* else:
*/
__pyx_slice__18 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__18)) __PYX_ERR(2, 682, __pyx_L1_error)
__Pyx_GOTREF(__pyx_slice__18);
__Pyx_GIVEREF(__pyx_slice__18);
/* "View.MemoryView":703
* for suboffset in suboffsets[:ndim]:
* if suboffset >= 0:
* raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(2, 703, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__19);
__Pyx_GIVEREF(__pyx_tuple__19);
/* "(tree fragment)":2
* def __reduce_cython__(self):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
*/
__pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(2, 2, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__20);
__Pyx_GIVEREF(__pyx_tuple__20);
/* "(tree fragment)":4
* raise TypeError("no default __reduce__ due to non-trivial __cinit__")
* def __setstate_cython__(self, __pyx_state):
* raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<<
*/
__pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(2, 4, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__21);
__Pyx_GIVEREF(__pyx_tuple__21);
/* "draco/util/truncate.pyx":12
* inline float bit_truncate_float(float val, float err) nogil
*
* def bit_truncate(float val, float err): # <<<<<<<<<<<<<<
* return bit_truncate_float(val, err)
*
*/
__pyx_tuple__22 = PyTuple_Pack(2, __pyx_n_s_val, __pyx_n_s_err); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__22);
__Pyx_GIVEREF(__pyx_tuple__22);
__pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate, 12, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 12, __pyx_L1_error)
/* "draco/util/truncate.pyx":17
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* if val.ndim != 1:
*/
__pyx_tuple__24 = PyTuple_Pack(5, __pyx_n_s_val, __pyx_n_s_wgt, __pyx_n_s_fallback, __pyx_n_s_n, __pyx_n_s_i); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__24);
__Pyx_GIVEREF(__pyx_tuple__24);
__pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate_weights, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 17, __pyx_L1_error)
/* "draco/util/truncate.pyx":39
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* cdef int i = 0
*/
__pyx_tuple__26 = PyTuple_Pack(4, __pyx_n_s_val, __pyx_n_s_prec, __pyx_n_s_n, __pyx_n_s_i); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__26);
__Pyx_GIVEREF(__pyx_tuple__26);
__pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 4, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_draco_util_truncate_pyx, __pyx_n_s_bit_truncate_fixed, 39, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 39, __pyx_L1_error)
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(2, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__28);
__Pyx_GIVEREF(__pyx_tuple__28);
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(2, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__29);
__Pyx_GIVEREF(__pyx_tuple__29);
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(2, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__30);
__Pyx_GIVEREF(__pyx_tuple__30);
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(2, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__31);
__Pyx_GIVEREF(__pyx_tuple__31);
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_tuple__32 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(2, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__32);
__Pyx_GIVEREF(__pyx_tuple__32);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_tuple__33 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__33)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_tuple__33);
__Pyx_GIVEREF(__pyx_tuple__33);
__pyx_codeobj__34 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__33, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__34)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) {
/* InitThreads.init */
#ifdef WITH_THREAD
PyEval_InitThreads();
#endif
if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error)
if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
__pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error)
return 0;
__pyx_L1_error:;
return -1;
}
static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/
static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/
static int __Pyx_modinit_global_init_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0);
/*--- Global init code ---*/
generic = Py_None; Py_INCREF(Py_None);
strided = Py_None; Py_INCREF(Py_None);
indirect = Py_None; Py_INCREF(Py_None);
contiguous = Py_None; Py_INCREF(Py_None);
indirect_contiguous = Py_None; Py_INCREF(Py_None);
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_variable_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0);
/*--- Variable export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_export_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0);
/*--- Function export code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_type_init_code(void) {
__Pyx_RefNannyDeclarations
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0);
/*--- Type init code ---*/
__pyx_vtabptr_array = &__pyx_vtable_array;
__pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview;
if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_array.tp_print = 0;
#endif
if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(2, 105, __pyx_L1_error)
__pyx_array_type = &__pyx_type___pyx_array;
if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_MemviewEnum.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_MemviewEnum.tp_dictoffset && __pyx_type___pyx_MemviewEnum.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_MemviewEnum.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(2, 279, __pyx_L1_error)
__pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum;
__pyx_vtabptr_memoryview = &__pyx_vtable_memoryview;
__pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer;
__pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice;
__pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment;
__pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar;
__pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed;
__pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object;
__pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object;
if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryview.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryview.tp_dictoffset && __pyx_type___pyx_memoryview.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryview.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(2, 330, __pyx_L1_error)
__pyx_memoryview_type = &__pyx_type___pyx_memoryview;
__pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice;
__pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview;
__pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object;
__pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object;
__pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type;
if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
#if PY_VERSION_HEX < 0x030800B1
__pyx_type___pyx_memoryviewslice.tp_print = 0;
#endif
if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type___pyx_memoryviewslice.tp_dictoffset && __pyx_type___pyx_memoryviewslice.tp_getattro == PyObject_GenericGetAttr)) {
__pyx_type___pyx_memoryviewslice.tp_getattro = __Pyx_PyObject_GenericGetAttr;
}
if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(2, 965, __pyx_L1_error)
__pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_type_import_code(void) {
__Pyx_RefNannyDeclarations
PyObject *__pyx_t_1 = NULL;
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0);
/*--- Type import code ---*/
__pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type",
#if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000
sizeof(PyTypeObject),
#else
sizeof(PyHeapTypeObject),
#endif
__Pyx_ImportType_CheckSize_Warn);
if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 199, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__pyx_ptype_5numpy_dtype = __Pyx_ImportType(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(1, 199, __pyx_L1_error)
__pyx_ptype_5numpy_flatiter = __Pyx_ImportType(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(1, 222, __pyx_L1_error)
__pyx_ptype_5numpy_broadcast = __Pyx_ImportType(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(1, 226, __pyx_L1_error)
__pyx_ptype_5numpy_ndarray = __Pyx_ImportType(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(1, 238, __pyx_L1_error)
__pyx_ptype_5numpy_ufunc = __Pyx_ImportType(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __Pyx_ImportType_CheckSize_Ignore);
if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(1, 764, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
__Pyx_RefNannyFinishContext();
return 0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
__Pyx_RefNannyFinishContext();
return -1;
}
static int __Pyx_modinit_variable_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0);
/*--- Variable import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
static int __Pyx_modinit_function_import_code(void) {
__Pyx_RefNannyDeclarations
__Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0);
/*--- Function import code ---*/
__Pyx_RefNannyFinishContext();
return 0;
}
#ifndef CYTHON_NO_PYINIT_EXPORT
#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC
#elif PY_MAJOR_VERSION < 3
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" void
#else
#define __Pyx_PyMODINIT_FUNC void
#endif
#else
#ifdef __cplusplus
#define __Pyx_PyMODINIT_FUNC extern "C" PyObject *
#else
#define __Pyx_PyMODINIT_FUNC PyObject *
#endif
#endif
#if PY_MAJOR_VERSION < 3
__Pyx_PyMODINIT_FUNC inittruncate(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC inittruncate(void)
#else
__Pyx_PyMODINIT_FUNC PyInit_truncate(void) CYTHON_SMALL_CODE; /*proto*/
__Pyx_PyMODINIT_FUNC PyInit_truncate(void)
#if CYTHON_PEP489_MULTI_PHASE_INIT
{
return PyModuleDef_Init(&__pyx_moduledef);
}
static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) {
#if PY_VERSION_HEX >= 0x030700A1
static PY_INT64_T main_interpreter_id = -1;
PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp);
if (main_interpreter_id == -1) {
main_interpreter_id = current_id;
return (unlikely(current_id == -1)) ? -1 : 0;
} else if (unlikely(main_interpreter_id != current_id))
#else
static PyInterpreterState *main_interpreter = NULL;
PyInterpreterState *current_interpreter = PyThreadState_Get()->interp;
if (!main_interpreter) {
main_interpreter = current_interpreter;
} else if (unlikely(main_interpreter != current_interpreter))
#endif
{
PyErr_SetString(
PyExc_ImportError,
"Interpreter change detected - this module can only be loaded into one interpreter per process.");
return -1;
}
return 0;
}
static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) {
PyObject *value = PyObject_GetAttrString(spec, from_name);
int result = 0;
if (likely(value)) {
if (allow_none || value != Py_None) {
result = PyDict_SetItemString(moddict, to_name, value);
}
Py_DECREF(value);
} else if (PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Clear();
} else {
result = -1;
}
return result;
}
static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) {
PyObject *module = NULL, *moddict, *modname;
if (__Pyx_check_single_interpreter())
return NULL;
if (__pyx_m)
return __Pyx_NewRef(__pyx_m);
modname = PyObject_GetAttrString(spec, "name");
if (unlikely(!modname)) goto bad;
module = PyModule_NewObject(modname);
Py_DECREF(modname);
if (unlikely(!module)) goto bad;
moddict = PyModule_GetDict(module);
if (unlikely(!moddict)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad;
if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad;
return module;
bad:
Py_XDECREF(module);
return NULL;
}
static CYTHON_SMALL_CODE int __pyx_pymod_exec_truncate(PyObject *__pyx_pyinit_module)
#endif
#endif
{
PyObject *__pyx_t_1 = NULL;
static PyThread_type_lock __pyx_t_2[8];
int __pyx_lineno = 0;
const char *__pyx_filename = NULL;
int __pyx_clineno = 0;
__Pyx_RefNannyDeclarations
#if CYTHON_PEP489_MULTI_PHASE_INIT
if (__pyx_m) {
if (__pyx_m == __pyx_pyinit_module) return 0;
PyErr_SetString(PyExc_RuntimeError, "Module 'truncate' has already been imported. Re-initialisation is not supported.");
return -1;
}
#elif PY_MAJOR_VERSION >= 3
if (__pyx_m) return __Pyx_NewRef(__pyx_m);
#endif
#if CYTHON_REFNANNY
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny");
if (!__Pyx_RefNanny) {
PyErr_Clear();
__Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny");
if (!__Pyx_RefNanny)
Py_FatalError("failed to import 'refnanny' module");
}
#endif
__Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_truncate(void)", 0);
if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pxy_PyFrame_Initialize_Offsets
__Pxy_PyFrame_Initialize_Offsets();
#endif
__pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error)
__pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error)
#ifdef __Pyx_CyFunction_USED
if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_FusedFunction_USED
if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Coroutine_USED
if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_Generator_USED
if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_AsyncGen_USED
if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
#ifdef __Pyx_StopAsyncIteration_USED
if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/*--- Library function declarations ---*/
/*--- Threads initialization code ---*/
#if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS
#ifdef WITH_THREAD /* Python build with threading support? */
PyEval_InitThreads();
#endif
#endif
/*--- Module creation code ---*/
#if CYTHON_PEP489_MULTI_PHASE_INIT
__pyx_m = __pyx_pyinit_module;
Py_INCREF(__pyx_m);
#else
#if PY_MAJOR_VERSION < 3
__pyx_m = Py_InitModule4("truncate", __pyx_methods, __pyx_k_draco_truncation_utils, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m);
#else
__pyx_m = PyModule_Create(&__pyx_moduledef);
#endif
if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
__pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_d);
__pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_b);
__pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error)
Py_INCREF(__pyx_cython_runtime);
if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error);
/*--- Initialize various global constants etc. ---*/
if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT)
if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
if (__pyx_module_is_main_draco__util__truncate) {
if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
}
#if PY_MAJOR_VERSION >= 3
{
PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error)
if (!PyDict_GetItemString(modules, "draco.util.truncate")) {
if (unlikely(PyDict_SetItemString(modules, "draco.util.truncate", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
}
}
#endif
/*--- Builtin init code ---*/
if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Constants init code ---*/
if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
/*--- Global type/function init code ---*/
(void)__Pyx_modinit_global_init_code();
(void)__Pyx_modinit_variable_export_code();
(void)__Pyx_modinit_function_export_code();
if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
if (unlikely(__Pyx_modinit_type_import_code() < 0)) __PYX_ERR(0, 1, __pyx_L1_error)
(void)__Pyx_modinit_variable_import_code();
(void)__Pyx_modinit_function_import_code();
/*--- Execution code ---*/
#if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED)
if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error)
#endif
/* "draco/util/truncate.pyx":6
* from cython.parallel import prange
*
* import numpy as np # <<<<<<<<<<<<<<
* cimport numpy as cnp
*
*/
__pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 6, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "draco/util/truncate.pyx":12
* inline float bit_truncate_float(float val, float err) nogil
*
* def bit_truncate(float val, float err): # <<<<<<<<<<<<<<
* return bit_truncate_float(val, err)
*
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_1bit_truncate, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate, __pyx_t_1) < 0) __PYX_ERR(0, 12, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "draco/util/truncate.pyx":17
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_weights(float[:] val, float[:] wgt, float fallback): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* if val.ndim != 1:
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_3bit_truncate_weights, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate_weights, __pyx_t_1) < 0) __PYX_ERR(0, 17, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "draco/util/truncate.pyx":39
* @cython.boundscheck(False)
* @cython.wraparound(False)
* def bit_truncate_fixed(float[:] val, float prec): # <<<<<<<<<<<<<<
* cdef int n = val.shape[0]
* cdef int i = 0
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_5draco_4util_8truncate_5bit_truncate_fixed, NULL, __pyx_n_s_draco_util_truncate); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_bit_truncate_fixed, __pyx_t_1) < 0) __PYX_ERR(0, 39, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "draco/util/truncate.pyx":1
* """draco truncation utils""" # <<<<<<<<<<<<<<
*
* cimport cython
*/
__pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "View.MemoryView":209
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
* def __dealloc__(array self):
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 209, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 209, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_array_type);
/* "View.MemoryView":286
* return self.name
*
* cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<<
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>")
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 286, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(generic);
__Pyx_DECREF_SET(generic, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":287
*
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<<
* cdef indirect = Enum("<strided and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 287, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(strided);
__Pyx_DECREF_SET(strided, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":288
* cdef generic = Enum("<strided and direct or indirect>")
* cdef strided = Enum("<strided and direct>") # default
* cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 288, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect);
__Pyx_DECREF_SET(indirect, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":291
*
*
* cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<<
* cdef indirect_contiguous = Enum("<contiguous and indirect>")
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 291, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(contiguous);
__Pyx_DECREF_SET(contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":292
*
* cdef contiguous = Enum("<contiguous and direct>")
* cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__32, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 292, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
__Pyx_XGOTREF(indirect_contiguous);
__Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1);
__Pyx_GIVEREF(__pyx_t_1);
__pyx_t_1 = 0;
/* "View.MemoryView":316
*
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<<
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [
* PyThread_allocate_lock(),
*/
__pyx_memoryview_thread_locks_used = 0;
/* "View.MemoryView":317
* DEF THREAD_LOCKS_PREALLOCATED = 8
* cdef int __pyx_memoryview_thread_locks_used = 0
* cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<<
* PyThread_allocate_lock(),
* PyThread_allocate_lock(),
*/
__pyx_t_2[0] = PyThread_allocate_lock();
__pyx_t_2[1] = PyThread_allocate_lock();
__pyx_t_2[2] = PyThread_allocate_lock();
__pyx_t_2[3] = PyThread_allocate_lock();
__pyx_t_2[4] = PyThread_allocate_lock();
__pyx_t_2[5] = PyThread_allocate_lock();
__pyx_t_2[6] = PyThread_allocate_lock();
__pyx_t_2[7] = PyThread_allocate_lock();
memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8));
/* "View.MemoryView":549
* info.obj = self
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 549, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 549, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryview_type);
/* "View.MemoryView":995
* return self.from_object
*
* __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<<
*
*
*/
__pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 995, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(2, 995, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
PyType_Modified(__pyx_memoryviewslice_type);
/* "(tree fragment)":1
* def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<<
* cdef object __pyx_PickleError
* cdef object __pyx_result
*/
__pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_GOTREF(__pyx_t_1);
if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(2, 1, __pyx_L1_error)
__Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0;
/* "(tree fragment)":11
* __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state)
* return __pyx_result
* cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<<
* __pyx_result.name = __pyx_state[0]
* if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'):
*/
/*--- Wrapped vars code ---*/
goto __pyx_L0;
__pyx_L1_error:;
__Pyx_XDECREF(__pyx_t_1);
if (__pyx_m) {
if (__pyx_d) {
__Pyx_AddTraceback("init draco.util.truncate", __pyx_clineno, __pyx_lineno, __pyx_filename);
}
Py_CLEAR(__pyx_m);
} else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_ImportError, "init draco.util.truncate");
}
__pyx_L0:;
__Pyx_RefNannyFinishContext();
#if CYTHON_PEP489_MULTI_PHASE_INIT
return (__pyx_m != NULL) ? 0 : -1;
#elif PY_MAJOR_VERSION >= 3
return __pyx_m;
#else
return;
#endif
}
/* --- Runtime support code --- */
/* Refnanny */
#if CYTHON_REFNANNY
static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) {
PyObject *m = NULL, *p = NULL;
void *r = NULL;
m = PyImport_ImportModule(modname);
if (!m) goto end;
p = PyObject_GetAttrString(m, "RefNannyAPI");
if (!p) goto end;
r = PyLong_AsVoidPtr(p);
end:
Py_XDECREF(p);
Py_XDECREF(m);
return (__Pyx_RefNannyAPIStruct *)r;
}
#endif
/* PyObjectGetAttrStr */
#if CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) {
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro))
return tp->tp_getattro(obj, attr_name);
#if PY_MAJOR_VERSION < 3
if (likely(tp->tp_getattr))
return tp->tp_getattr(obj, PyString_AS_STRING(attr_name));
#endif
return PyObject_GetAttr(obj, attr_name);
}
#endif
/* GetBuiltinName */
static PyObject *__Pyx_GetBuiltinName(PyObject *name) {
PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name);
if (unlikely(!result)) {
PyErr_Format(PyExc_NameError,
#if PY_MAJOR_VERSION >= 3
"name '%U' is not defined", name);
#else
"name '%.200s' is not defined", PyString_AS_STRING(name));
#endif
}
return result;
}
/* RaiseArgTupleInvalid */
static void __Pyx_RaiseArgtupleInvalid(
const char* func_name,
int exact,
Py_ssize_t num_min,
Py_ssize_t num_max,
Py_ssize_t num_found)
{
Py_ssize_t num_expected;
const char *more_or_less;
if (num_found < num_min) {
num_expected = num_min;
more_or_less = "at least";
} else {
num_expected = num_max;
more_or_less = "at most";
}
if (exact) {
more_or_less = "exactly";
}
PyErr_Format(PyExc_TypeError,
"%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)",
func_name, more_or_less, num_expected,
(num_expected == 1) ? "" : "s", num_found);
}
/* RaiseDoubleKeywords */
static void __Pyx_RaiseDoubleKeywordsError(
const char* func_name,
PyObject* kw_name)
{
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION >= 3
"%s() got multiple values for keyword argument '%U'", func_name, kw_name);
#else
"%s() got multiple values for keyword argument '%s'", func_name,
PyString_AsString(kw_name));
#endif
}
/* ParseKeywords */
static int __Pyx_ParseOptionalKeywords(
PyObject *kwds,
PyObject **argnames[],
PyObject *kwds2,
PyObject *values[],
Py_ssize_t num_pos_args,
const char* function_name)
{
PyObject *key = 0, *value = 0;
Py_ssize_t pos = 0;
PyObject*** name;
PyObject*** first_kw_arg = argnames + num_pos_args;
while (PyDict_Next(kwds, &pos, &key, &value)) {
name = first_kw_arg;
while (*name && (**name != key)) name++;
if (*name) {
values[name-argnames] = value;
continue;
}
name = first_kw_arg;
#if PY_MAJOR_VERSION < 3
if (likely(PyString_Check(key))) {
while (*name) {
if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key))
&& _PyString_Eq(**name, key)) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
if ((**argname == key) || (
(CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key))
&& _PyString_Eq(**argname, key))) {
goto arg_passed_twice;
}
argname++;
}
}
} else
#endif
if (likely(PyUnicode_Check(key))) {
while (*name) {
int cmp = (**name == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**name, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) {
values[name-argnames] = value;
break;
}
name++;
}
if (*name) continue;
else {
PyObject*** argname = argnames;
while (argname != first_kw_arg) {
int cmp = (**argname == key) ? 0 :
#if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3
(__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 :
#endif
PyUnicode_Compare(**argname, key);
if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad;
if (cmp == 0) goto arg_passed_twice;
argname++;
}
}
} else
goto invalid_keyword_type;
if (kwds2) {
if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad;
} else {
goto invalid_keyword;
}
}
return 0;
arg_passed_twice:
__Pyx_RaiseDoubleKeywordsError(function_name, key);
goto bad;
invalid_keyword_type:
PyErr_Format(PyExc_TypeError,
"%.200s() keywords must be strings", function_name);
goto bad;
invalid_keyword:
PyErr_Format(PyExc_TypeError,
#if PY_MAJOR_VERSION < 3
"%.200s() got an unexpected keyword argument '%.200s'",
function_name, PyString_AsString(key));
#else
"%s() got an unexpected keyword argument '%U'",
function_name, key);
#endif
bad:
return -1;
}
/* PyIntCompare */
static CYTHON_INLINE PyObject* __Pyx_PyInt_NeObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED long inplace) {
if (op1 == op2) {
Py_RETURN_FALSE;
}
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long a = PyInt_AS_LONG(op1);
if (a != b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
int unequal;
unsigned long uintval;
Py_ssize_t size = Py_SIZE(op1);
const digit* digits = ((PyLongObject*)op1)->ob_digit;
if (intval == 0) {
if (size != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
} else if (intval < 0) {
if (size >= 0)
Py_RETURN_TRUE;
intval = -intval;
size = -size;
} else {
if (size <= 0)
Py_RETURN_TRUE;
}
uintval = (unsigned long) intval;
#if PyLong_SHIFT * 4 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 4)) {
unequal = (size != 5) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[4] != ((uintval >> (4 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 3 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 3)) {
unequal = (size != 4) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[3] != ((uintval >> (3 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 2 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 2)) {
unequal = (size != 3) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK)) | (digits[2] != ((uintval >> (2 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
#if PyLong_SHIFT * 1 < SIZEOF_LONG*8
if (uintval >> (PyLong_SHIFT * 1)) {
unequal = (size != 2) || (digits[0] != (uintval & (unsigned long) PyLong_MASK))
| (digits[1] != ((uintval >> (1 * PyLong_SHIFT)) & (unsigned long) PyLong_MASK));
} else
#endif
unequal = (size != 1) || (((unsigned long) digits[0]) != (uintval & (unsigned long) PyLong_MASK));
if (unequal != 0) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
if ((double)a != (double)b) Py_RETURN_TRUE; else Py_RETURN_FALSE;
}
return (
PyObject_RichCompare(op1, op2, Py_NE));
}
/* PyObjectCall */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) {
PyObject *result;
ternaryfunc call = func->ob_type->tp_call;
if (unlikely(!call))
return PyObject_Call(func, arg, kw);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = (*call)(func, arg, kw);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyErrFetchRestore */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
tmp_type = tstate->curexc_type;
tmp_value = tstate->curexc_value;
tmp_tb = tstate->curexc_traceback;
tstate->curexc_type = type;
tstate->curexc_value = value;
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
*type = tstate->curexc_type;
*value = tstate->curexc_value;
*tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
}
#endif
/* RaiseException */
#if PY_MAJOR_VERSION < 3
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb,
CYTHON_UNUSED PyObject *cause) {
__Pyx_PyThreadState_declare
Py_XINCREF(type);
if (!value || value == Py_None)
value = NULL;
else
Py_INCREF(value);
if (!tb || tb == Py_None)
tb = NULL;
else {
Py_INCREF(tb);
if (!PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto raise_error;
}
}
if (PyType_Check(type)) {
#if CYTHON_COMPILING_IN_PYPY
if (!value) {
Py_INCREF(Py_None);
value = Py_None;
}
#endif
PyErr_NormalizeException(&type, &value, &tb);
} else {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto raise_error;
}
value = type;
type = (PyObject*) Py_TYPE(type);
Py_INCREF(type);
if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto raise_error;
}
}
__Pyx_PyThreadState_assign
__Pyx_ErrRestore(type, value, tb);
return;
raise_error:
Py_XDECREF(value);
Py_XDECREF(type);
Py_XDECREF(tb);
return;
}
#else
static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) {
PyObject* owned_instance = NULL;
if (tb == Py_None) {
tb = 0;
} else if (tb && !PyTraceBack_Check(tb)) {
PyErr_SetString(PyExc_TypeError,
"raise: arg 3 must be a traceback or None");
goto bad;
}
if (value == Py_None)
value = 0;
if (PyExceptionInstance_Check(type)) {
if (value) {
PyErr_SetString(PyExc_TypeError,
"instance exception may not have a separate value");
goto bad;
}
value = type;
type = (PyObject*) Py_TYPE(value);
} else if (PyExceptionClass_Check(type)) {
PyObject *instance_class = NULL;
if (value && PyExceptionInstance_Check(value)) {
instance_class = (PyObject*) Py_TYPE(value);
if (instance_class != type) {
int is_subclass = PyObject_IsSubclass(instance_class, type);
if (!is_subclass) {
instance_class = NULL;
} else if (unlikely(is_subclass == -1)) {
goto bad;
} else {
type = instance_class;
}
}
}
if (!instance_class) {
PyObject *args;
if (!value)
args = PyTuple_New(0);
else if (PyTuple_Check(value)) {
Py_INCREF(value);
args = value;
} else
args = PyTuple_Pack(1, value);
if (!args)
goto bad;
owned_instance = PyObject_Call(type, args, NULL);
Py_DECREF(args);
if (!owned_instance)
goto bad;
value = owned_instance;
if (!PyExceptionInstance_Check(value)) {
PyErr_Format(PyExc_TypeError,
"calling %R should have returned an instance of "
"BaseException, not %R",
type, Py_TYPE(value));
goto bad;
}
}
} else {
PyErr_SetString(PyExc_TypeError,
"raise: exception class must be a subclass of BaseException");
goto bad;
}
if (cause) {
PyObject *fixed_cause;
if (cause == Py_None) {
fixed_cause = NULL;
} else if (PyExceptionClass_Check(cause)) {
fixed_cause = PyObject_CallObject(cause, NULL);
if (fixed_cause == NULL)
goto bad;
} else if (PyExceptionInstance_Check(cause)) {
fixed_cause = cause;
Py_INCREF(fixed_cause);
} else {
PyErr_SetString(PyExc_TypeError,
"exception causes must derive from "
"BaseException");
goto bad;
}
PyException_SetCause(value, fixed_cause);
}
PyErr_SetObject(type, value);
if (tb) {
#if CYTHON_COMPILING_IN_PYPY
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb);
Py_INCREF(tb);
PyErr_Restore(tmp_type, tmp_value, tb);
Py_XDECREF(tmp_tb);
#else
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject* tmp_tb = tstate->curexc_traceback;
if (tb != tmp_tb) {
Py_INCREF(tb);
tstate->curexc_traceback = tb;
Py_XDECREF(tmp_tb);
}
#endif
}
bad:
Py_XDECREF(owned_instance);
return;
}
#endif
/* PyFunctionFastCall */
#if CYTHON_FAST_PYCALL
static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na,
PyObject *globals) {
PyFrameObject *f;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
PyObject **fastlocals;
Py_ssize_t i;
PyObject *result;
assert(globals != NULL);
/* XXX Perhaps we should create a specialized
PyFrame_New() that doesn't take locals, but does
take builtins without sanity checking them.
*/
assert(tstate != NULL);
f = PyFrame_New(tstate, co, globals, NULL);
if (f == NULL) {
return NULL;
}
fastlocals = __Pyx_PyFrame_GetLocalsplus(f);
for (i = 0; i < na; i++) {
Py_INCREF(*args);
fastlocals[i] = *args++;
}
result = PyEval_EvalFrameEx(f,0);
++tstate->recursion_depth;
Py_DECREF(f);
--tstate->recursion_depth;
return result;
}
#if 1 || PY_VERSION_HEX < 0x030600B1
static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) {
PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func);
PyObject *globals = PyFunction_GET_GLOBALS(func);
PyObject *argdefs = PyFunction_GET_DEFAULTS(func);
PyObject *closure;
#if PY_MAJOR_VERSION >= 3
PyObject *kwdefs;
#endif
PyObject *kwtuple, **k;
PyObject **d;
Py_ssize_t nd;
Py_ssize_t nk;
PyObject *result;
assert(kwargs == NULL || PyDict_Check(kwargs));
nk = kwargs ? PyDict_Size(kwargs) : 0;
if (Py_EnterRecursiveCall((char*)" while calling a Python object")) {
return NULL;
}
if (
#if PY_MAJOR_VERSION >= 3
co->co_kwonlyargcount == 0 &&
#endif
likely(kwargs == NULL || nk == 0) &&
co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) {
if (argdefs == NULL && co->co_argcount == nargs) {
result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals);
goto done;
}
else if (nargs == 0 && argdefs != NULL
&& co->co_argcount == Py_SIZE(argdefs)) {
/* function called with no arguments, but all parameters have
a default value: use default values as arguments .*/
args = &PyTuple_GET_ITEM(argdefs, 0);
result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals);
goto done;
}
}
if (kwargs != NULL) {
Py_ssize_t pos, i;
kwtuple = PyTuple_New(2 * nk);
if (kwtuple == NULL) {
result = NULL;
goto done;
}
k = &PyTuple_GET_ITEM(kwtuple, 0);
pos = i = 0;
while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) {
Py_INCREF(k[i]);
Py_INCREF(k[i+1]);
i += 2;
}
nk = i / 2;
}
else {
kwtuple = NULL;
k = NULL;
}
closure = PyFunction_GET_CLOSURE(func);
#if PY_MAJOR_VERSION >= 3
kwdefs = PyFunction_GET_KW_DEFAULTS(func);
#endif
if (argdefs != NULL) {
d = &PyTuple_GET_ITEM(argdefs, 0);
nd = Py_SIZE(argdefs);
}
else {
d = NULL;
nd = 0;
}
#if PY_MAJOR_VERSION >= 3
result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, kwdefs, closure);
#else
result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL,
args, (int)nargs,
k, (int)nk,
d, (int)nd, closure);
#endif
Py_XDECREF(kwtuple);
done:
Py_LeaveRecursiveCall();
return result;
}
#endif
#endif
/* PyCFunctionFastCall */
#if CYTHON_FAST_PYCCALL
static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) {
PyCFunctionObject *func = (PyCFunctionObject*)func_obj;
PyCFunction meth = PyCFunction_GET_FUNCTION(func);
PyObject *self = PyCFunction_GET_SELF(func);
int flags = PyCFunction_GET_FLAGS(func);
assert(PyCFunction_Check(func));
assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS)));
assert(nargs >= 0);
assert(nargs == 0 || args != NULL);
/* _PyCFunction_FastCallDict() must not be called with an exception set,
because it may clear it (directly or indirectly) and so the
caller loses its exception */
assert(!PyErr_Occurred());
if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) {
return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL);
} else {
return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs);
}
}
#endif
/* PyObjectCallMethO */
#if CYTHON_COMPILING_IN_CPYTHON
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) {
PyObject *self, *result;
PyCFunction cfunc;
cfunc = PyCFunction_GET_FUNCTION(func);
self = PyCFunction_GET_SELF(func);
if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object")))
return NULL;
result = cfunc(self, arg);
Py_LeaveRecursiveCall();
if (unlikely(!result) && unlikely(!PyErr_Occurred())) {
PyErr_SetString(
PyExc_SystemError,
"NULL result without error in PyObject_Call");
}
return result;
}
#endif
/* PyObjectCallOneArg */
#if CYTHON_COMPILING_IN_CPYTHON
static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_New(1);
if (unlikely(!args)) return NULL;
Py_INCREF(arg);
PyTuple_SET_ITEM(args, 0, arg);
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(func)) {
return __Pyx_PyFunction_FastCall(func, &arg, 1);
}
#endif
if (likely(PyCFunction_Check(func))) {
if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) {
return __Pyx_PyObject_CallMethO(func, arg);
#if CYTHON_FAST_PYCCALL
} else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) {
return __Pyx_PyCFunction_FastCall(func, &arg, 1);
#endif
}
}
return __Pyx__PyObject_CallOneArg(func, arg);
}
#else
static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) {
PyObject *result;
PyObject *args = PyTuple_Pack(1, arg);
if (unlikely(!args)) return NULL;
result = __Pyx_PyObject_Call(func, args, NULL);
Py_DECREF(args);
return result;
}
#endif
/* PyDictVersioning */
#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS
static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0;
}
static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) {
PyObject **dictptr = NULL;
Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset;
if (offset) {
#if CYTHON_COMPILING_IN_CPYTHON
dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj);
#else
dictptr = _PyObject_GetDictPtr(obj);
#endif
}
return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0;
}
static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) {
PyObject *dict = Py_TYPE(obj)->tp_dict;
if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict)))
return 0;
return obj_dict_version == __Pyx_get_object_dict_version(obj);
}
#endif
/* GetModuleGlobalName */
#if CYTHON_USE_DICT_VERSIONS
static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value)
#else
static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name)
#endif
{
PyObject *result;
#if !CYTHON_AVOID_BORROWED_REFS
#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1
result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
} else if (unlikely(PyErr_Occurred())) {
return NULL;
}
#else
result = PyDict_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
#endif
#else
result = PyObject_GetItem(__pyx_d, name);
__PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version)
if (likely(result)) {
return __Pyx_NewRef(result);
}
PyErr_Clear();
#endif
return __Pyx_GetBuiltinName(name);
}
/* PyObjectCall2Args */
static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) {
PyObject *args, *result = NULL;
#if CYTHON_FAST_PYCALL
if (PyFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyFunction_FastCall(function, args, 2);
}
#endif
#if CYTHON_FAST_PYCCALL
if (__Pyx_PyFastCFunction_Check(function)) {
PyObject *args[2] = {arg1, arg2};
return __Pyx_PyCFunction_FastCall(function, args, 2);
}
#endif
args = PyTuple_New(2);
if (unlikely(!args)) goto done;
Py_INCREF(arg1);
PyTuple_SET_ITEM(args, 0, arg1);
Py_INCREF(arg2);
PyTuple_SET_ITEM(args, 1, arg2);
Py_INCREF(function);
result = __Pyx_PyObject_Call(function, args, NULL);
Py_DECREF(args);
Py_DECREF(function);
done:
return result;
}
/* MemviewSliceInit */
static int
__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview,
int ndim,
__Pyx_memviewslice *memviewslice,
int memview_is_new_reference)
{
__Pyx_RefNannyDeclarations
int i, retval=-1;
Py_buffer *buf = &memview->view;
__Pyx_RefNannySetupContext("init_memviewslice", 0);
if (unlikely(memviewslice->memview || memviewslice->data)) {
PyErr_SetString(PyExc_ValueError,
"memviewslice is already initialized!");
goto fail;
}
if (buf->strides) {
for (i = 0; i < ndim; i++) {
memviewslice->strides[i] = buf->strides[i];
}
} else {
Py_ssize_t stride = buf->itemsize;
for (i = ndim - 1; i >= 0; i--) {
memviewslice->strides[i] = stride;
stride *= buf->shape[i];
}
}
for (i = 0; i < ndim; i++) {
memviewslice->shape[i] = buf->shape[i];
if (buf->suboffsets) {
memviewslice->suboffsets[i] = buf->suboffsets[i];
} else {
memviewslice->suboffsets[i] = -1;
}
}
memviewslice->memview = memview;
memviewslice->data = (char *)buf->buf;
if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) {
Py_INCREF(memview);
}
retval = 0;
goto no_fail;
fail:
memviewslice->memview = 0;
memviewslice->data = 0;
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
#ifndef Py_NO_RETURN
#define Py_NO_RETURN
#endif
static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN {
va_list vargs;
char msg[200];
#ifdef HAVE_STDARG_PROTOTYPES
va_start(vargs, fmt);
#else
va_start(vargs);
#endif
vsnprintf(msg, 200, fmt, vargs);
va_end(vargs);
Py_FatalError(msg);
}
static CYTHON_INLINE int
__pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)++;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE int
__pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count,
PyThread_type_lock lock)
{
int result;
PyThread_acquire_lock(lock, 1);
result = (*acquisition_count)--;
PyThread_release_lock(lock);
return result;
}
static CYTHON_INLINE void
__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno)
{
int first_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None))
return;
if (unlikely(__pyx_get_slice_count(memview) < 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
first_time = __pyx_add_acquisition_count(memview) == 0;
if (unlikely(first_time)) {
if (have_gil) {
Py_INCREF((PyObject *) memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_INCREF((PyObject *) memview);
PyGILState_Release(_gilstate);
}
}
}
static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice,
int have_gil, int lineno) {
int last_time;
struct __pyx_memoryview_obj *memview = memslice->memview;
if (unlikely(!memview || (PyObject *) memview == Py_None)) {
memslice->memview = NULL;
return;
}
if (unlikely(__pyx_get_slice_count(memview) <= 0))
__pyx_fatalerror("Acquisition count is %d (line %d)",
__pyx_get_slice_count(memview), lineno);
last_time = __pyx_sub_acquisition_count(memview) == 1;
memslice->data = NULL;
if (unlikely(last_time)) {
if (have_gil) {
Py_CLEAR(memslice->memview);
} else {
PyGILState_STATE _gilstate = PyGILState_Ensure();
Py_CLEAR(memslice->memview);
PyGILState_Release(_gilstate);
}
} else {
memslice->memview = NULL;
}
}
/* GetTopmostException */
#if CYTHON_USE_EXC_INFO_STACK
static _PyErr_StackItem *
__Pyx_PyErr_GetTopmostException(PyThreadState *tstate)
{
_PyErr_StackItem *exc_info = tstate->exc_info;
while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) &&
exc_info->previous_item != NULL)
{
exc_info = exc_info->previous_item;
}
return exc_info;
}
#endif
/* SaveResetException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate);
*type = exc_info->exc_type;
*value = exc_info->exc_value;
*tb = exc_info->exc_traceback;
#else
*type = tstate->exc_type;
*value = tstate->exc_value;
*tb = tstate->exc_traceback;
#endif
Py_XINCREF(*type);
Py_XINCREF(*value);
Py_XINCREF(*tb);
}
static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = type;
exc_info->exc_value = value;
exc_info->exc_traceback = tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = type;
tstate->exc_value = value;
tstate->exc_traceback = tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
}
#endif
/* PyErrExceptionMatches */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1;
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) {
PyObject *exc_type = tstate->curexc_type;
if (exc_type == err) return 1;
if (unlikely(!exc_type)) return 0;
if (unlikely(PyTuple_Check(err)))
return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err);
return __Pyx_PyErr_GivenExceptionMatches(exc_type, err);
}
#endif
/* GetException */
#if CYTHON_FAST_THREAD_STATE
static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb)
#else
static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb)
#endif
{
PyObject *local_type, *local_value, *local_tb;
#if CYTHON_FAST_THREAD_STATE
PyObject *tmp_type, *tmp_value, *tmp_tb;
local_type = tstate->curexc_type;
local_value = tstate->curexc_value;
local_tb = tstate->curexc_traceback;
tstate->curexc_type = 0;
tstate->curexc_value = 0;
tstate->curexc_traceback = 0;
#else
PyErr_Fetch(&local_type, &local_value, &local_tb);
#endif
PyErr_NormalizeException(&local_type, &local_value, &local_tb);
#if CYTHON_FAST_THREAD_STATE
if (unlikely(tstate->curexc_type))
#else
if (unlikely(PyErr_Occurred()))
#endif
goto bad;
#if PY_MAJOR_VERSION >= 3
if (local_tb) {
if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0))
goto bad;
}
#endif
Py_XINCREF(local_tb);
Py_XINCREF(local_type);
Py_XINCREF(local_value);
*type = local_type;
*value = local_value;
*tb = local_tb;
#if CYTHON_FAST_THREAD_STATE
#if CYTHON_USE_EXC_INFO_STACK
{
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = local_type;
exc_info->exc_value = local_value;
exc_info->exc_traceback = local_tb;
}
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = local_type;
tstate->exc_value = local_value;
tstate->exc_traceback = local_tb;
#endif
Py_XDECREF(tmp_type);
Py_XDECREF(tmp_value);
Py_XDECREF(tmp_tb);
#else
PyErr_SetExcInfo(local_type, local_value, local_tb);
#endif
return 0;
bad:
*type = 0;
*value = 0;
*tb = 0;
Py_XDECREF(local_type);
Py_XDECREF(local_value);
Py_XDECREF(local_tb);
return -1;
}
/* ArgTypeTest */
static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact)
{
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
else if (exact) {
#if PY_MAJOR_VERSION == 2
if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1;
#endif
}
else {
if (likely(__Pyx_TypeCheck(obj, type))) return 1;
}
PyErr_Format(PyExc_TypeError,
"Argument '%.200s' has incorrect type (expected %.200s, got %.200s)",
name, type->tp_name, Py_TYPE(obj)->tp_name);
return 0;
}
/* BytesEquals */
static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
if (s1 == s2) {
return (equals == Py_EQ);
} else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) {
const char *ps1, *ps2;
Py_ssize_t length = PyBytes_GET_SIZE(s1);
if (length != PyBytes_GET_SIZE(s2))
return (equals == Py_NE);
ps1 = PyBytes_AS_STRING(s1);
ps2 = PyBytes_AS_STRING(s2);
if (ps1[0] != ps2[0]) {
return (equals == Py_NE);
} else if (length == 1) {
return (equals == Py_EQ);
} else {
int result;
#if CYTHON_USE_UNICODE_INTERNALS
Py_hash_t hash1, hash2;
hash1 = ((PyBytesObject*)s1)->ob_shash;
hash2 = ((PyBytesObject*)s2)->ob_shash;
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
return (equals == Py_NE);
}
#endif
result = memcmp(ps1, ps2, (size_t)length);
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) {
return (equals == Py_NE);
} else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) {
return (equals == Py_NE);
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
#endif
}
/* UnicodeEquals */
static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) {
#if CYTHON_COMPILING_IN_PYPY
return PyObject_RichCompareBool(s1, s2, equals);
#else
#if PY_MAJOR_VERSION < 3
PyObject* owned_ref = NULL;
#endif
int s1_is_unicode, s2_is_unicode;
if (s1 == s2) {
goto return_eq;
}
s1_is_unicode = PyUnicode_CheckExact(s1);
s2_is_unicode = PyUnicode_CheckExact(s2);
#if PY_MAJOR_VERSION < 3
if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) {
owned_ref = PyUnicode_FromObject(s2);
if (unlikely(!owned_ref))
return -1;
s2 = owned_ref;
s2_is_unicode = 1;
} else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) {
owned_ref = PyUnicode_FromObject(s1);
if (unlikely(!owned_ref))
return -1;
s1 = owned_ref;
s1_is_unicode = 1;
} else if (((!s2_is_unicode) & (!s1_is_unicode))) {
return __Pyx_PyBytes_Equals(s1, s2, equals);
}
#endif
if (s1_is_unicode & s2_is_unicode) {
Py_ssize_t length;
int kind;
void *data1, *data2;
if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0))
return -1;
length = __Pyx_PyUnicode_GET_LENGTH(s1);
if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) {
goto return_ne;
}
#if CYTHON_USE_UNICODE_INTERNALS
{
Py_hash_t hash1, hash2;
#if CYTHON_PEP393_ENABLED
hash1 = ((PyASCIIObject*)s1)->hash;
hash2 = ((PyASCIIObject*)s2)->hash;
#else
hash1 = ((PyUnicodeObject*)s1)->hash;
hash2 = ((PyUnicodeObject*)s2)->hash;
#endif
if (hash1 != hash2 && hash1 != -1 && hash2 != -1) {
goto return_ne;
}
}
#endif
kind = __Pyx_PyUnicode_KIND(s1);
if (kind != __Pyx_PyUnicode_KIND(s2)) {
goto return_ne;
}
data1 = __Pyx_PyUnicode_DATA(s1);
data2 = __Pyx_PyUnicode_DATA(s2);
if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) {
goto return_ne;
} else if (length == 1) {
goto return_eq;
} else {
int result = memcmp(data1, data2, (size_t)(length * kind));
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ) ? (result == 0) : (result != 0);
}
} else if ((s1 == Py_None) & s2_is_unicode) {
goto return_ne;
} else if ((s2 == Py_None) & s1_is_unicode) {
goto return_ne;
} else {
int result;
PyObject* py_result = PyObject_RichCompare(s1, s2, equals);
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
if (!py_result)
return -1;
result = __Pyx_PyObject_IsTrue(py_result);
Py_DECREF(py_result);
return result;
}
return_eq:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_EQ);
return_ne:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(owned_ref);
#endif
return (equals == Py_NE);
#endif
}
/* None */
static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) {
Py_ssize_t q = a / b;
Py_ssize_t r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* GetAttr */
static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) {
#if CYTHON_USE_TYPE_SLOTS
#if PY_MAJOR_VERSION >= 3
if (likely(PyUnicode_Check(n)))
#else
if (likely(PyString_Check(n)))
#endif
return __Pyx_PyObject_GetAttrStr(o, n);
#endif
return PyObject_GetAttr(o, n);
}
/* GetItemInt */
static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) {
PyObject *r;
if (!j) return NULL;
r = PyObject_GetItem(o, j);
Py_DECREF(j);
return r;
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyList_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) {
PyObject *r = PyList_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS
Py_ssize_t wrapped_i = i;
if (wraparound & unlikely(i < 0)) {
wrapped_i += PyTuple_GET_SIZE(o);
}
if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, wrapped_i);
Py_INCREF(r);
return r;
}
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
#else
return PySequence_GetItem(o, i);
#endif
}
static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list,
CYTHON_NCP_UNUSED int wraparound,
CYTHON_NCP_UNUSED int boundscheck) {
#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS
if (is_list || PyList_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o);
if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) {
PyObject *r = PyList_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
}
else if (PyTuple_CheckExact(o)) {
Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o);
if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) {
PyObject *r = PyTuple_GET_ITEM(o, n);
Py_INCREF(r);
return r;
}
} else {
PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence;
if (likely(m && m->sq_item)) {
if (wraparound && unlikely(i < 0) && likely(m->sq_length)) {
Py_ssize_t l = m->sq_length(o);
if (likely(l >= 0)) {
i += l;
} else {
if (!PyErr_ExceptionMatches(PyExc_OverflowError))
return NULL;
PyErr_Clear();
}
}
return m->sq_item(o, i);
}
}
#else
if (is_list || PySequence_Check(o)) {
return PySequence_GetItem(o, i);
}
#endif
return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i));
}
/* ObjectGetItem */
#if CYTHON_USE_TYPE_SLOTS
static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject* index) {
PyObject *runerr;
Py_ssize_t key_value;
PySequenceMethods *m = Py_TYPE(obj)->tp_as_sequence;
if (unlikely(!(m && m->sq_item))) {
PyErr_Format(PyExc_TypeError, "'%.200s' object is not subscriptable", Py_TYPE(obj)->tp_name);
return NULL;
}
key_value = __Pyx_PyIndex_AsSsize_t(index);
if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) {
return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1);
}
if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) {
PyErr_Clear();
PyErr_Format(PyExc_IndexError, "cannot fit '%.200s' into an index-sized integer", Py_TYPE(index)->tp_name);
}
return NULL;
}
static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject* key) {
PyMappingMethods *m = Py_TYPE(obj)->tp_as_mapping;
if (likely(m && m->mp_subscript)) {
return m->mp_subscript(obj, key);
}
return __Pyx_PyObject_GetIndex(obj, key);
}
#endif
/* decode_c_string */
static CYTHON_INLINE PyObject* __Pyx_decode_c_string(
const char* cstring, Py_ssize_t start, Py_ssize_t stop,
const char* encoding, const char* errors,
PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) {
Py_ssize_t length;
if (unlikely((start < 0) | (stop < 0))) {
size_t slen = strlen(cstring);
if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) {
PyErr_SetString(PyExc_OverflowError,
"c-string too long to convert to Python");
return NULL;
}
length = (Py_ssize_t) slen;
if (start < 0) {
start += length;
if (start < 0)
start = 0;
}
if (stop < 0)
stop += length;
}
if (unlikely(stop <= start))
return __Pyx_NewRef(__pyx_empty_unicode);
length = stop - start;
cstring += start;
if (decode_func) {
return decode_func(cstring, length, errors);
} else {
return PyUnicode_Decode(cstring, length, encoding, errors);
}
}
/* GetAttr3 */
static PyObject *__Pyx_GetAttr3Default(PyObject *d) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
return NULL;
__Pyx_PyErr_Clear();
Py_INCREF(d);
return d;
}
static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) {
PyObject *r = __Pyx_GetAttr(o, n);
return (likely(r)) ? r : __Pyx_GetAttr3Default(d);
}
/* RaiseTooManyValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) {
PyErr_Format(PyExc_ValueError,
"too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected);
}
/* RaiseNeedMoreValuesToUnpack */
static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) {
PyErr_Format(PyExc_ValueError,
"need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack",
index, (index == 1) ? "" : "s");
}
/* RaiseNoneIterError */
static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) {
PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable");
}
/* ExtTypeTest */
static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) {
if (unlikely(!type)) {
PyErr_SetString(PyExc_SystemError, "Missing type object");
return 0;
}
if (likely(__Pyx_TypeCheck(obj, type)))
return 1;
PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s",
Py_TYPE(obj)->tp_name, type->tp_name);
return 0;
}
/* SwapException */
#if CYTHON_FAST_THREAD_STATE
static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
#if CYTHON_USE_EXC_INFO_STACK
_PyErr_StackItem *exc_info = tstate->exc_info;
tmp_type = exc_info->exc_type;
tmp_value = exc_info->exc_value;
tmp_tb = exc_info->exc_traceback;
exc_info->exc_type = *type;
exc_info->exc_value = *value;
exc_info->exc_traceback = *tb;
#else
tmp_type = tstate->exc_type;
tmp_value = tstate->exc_value;
tmp_tb = tstate->exc_traceback;
tstate->exc_type = *type;
tstate->exc_value = *value;
tstate->exc_traceback = *tb;
#endif
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#else
static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) {
PyObject *tmp_type, *tmp_value, *tmp_tb;
PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb);
PyErr_SetExcInfo(*type, *value, *tb);
*type = tmp_type;
*value = tmp_value;
*tb = tmp_tb;
}
#endif
/* Import */
static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) {
PyObject *empty_list = 0;
PyObject *module = 0;
PyObject *global_dict = 0;
PyObject *empty_dict = 0;
PyObject *list;
#if PY_MAJOR_VERSION < 3
PyObject *py_import;
py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import);
if (!py_import)
goto bad;
#endif
if (from_list)
list = from_list;
else {
empty_list = PyList_New(0);
if (!empty_list)
goto bad;
list = empty_list;
}
global_dict = PyModule_GetDict(__pyx_m);
if (!global_dict)
goto bad;
empty_dict = PyDict_New();
if (!empty_dict)
goto bad;
{
#if PY_MAJOR_VERSION >= 3
if (level == -1) {
if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) {
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, 1);
if (!module) {
if (!PyErr_ExceptionMatches(PyExc_ImportError))
goto bad;
PyErr_Clear();
}
}
level = 0;
}
#endif
if (!module) {
#if PY_MAJOR_VERSION < 3
PyObject *py_level = PyInt_FromLong(level);
if (!py_level)
goto bad;
module = PyObject_CallFunctionObjArgs(py_import,
name, global_dict, empty_dict, list, py_level, (PyObject *)NULL);
Py_DECREF(py_level);
#else
module = PyImport_ImportModuleLevelObject(
name, global_dict, empty_dict, list, level);
#endif
}
}
bad:
#if PY_MAJOR_VERSION < 3
Py_XDECREF(py_import);
#endif
Py_XDECREF(empty_list);
Py_XDECREF(empty_dict);
return module;
}
/* FastTypeChecks */
#if CYTHON_COMPILING_IN_CPYTHON
static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) {
while (a) {
a = a->tp_base;
if (a == b)
return 1;
}
return b == &PyBaseObject_Type;
}
static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) {
PyObject *mro;
if (a == b) return 1;
mro = a->tp_mro;
if (likely(mro)) {
Py_ssize_t i, n;
n = PyTuple_GET_SIZE(mro);
for (i = 0; i < n; i++) {
if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b)
return 1;
}
return 0;
}
return __Pyx_InBases(a, b);
}
#if PY_MAJOR_VERSION == 2
static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) {
PyObject *exception, *value, *tb;
int res;
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
__Pyx_ErrFetch(&exception, &value, &tb);
res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0;
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
if (!res) {
res = PyObject_IsSubclass(err, exc_type2);
if (unlikely(res == -1)) {
PyErr_WriteUnraisable(err);
res = 0;
}
}
__Pyx_ErrRestore(exception, value, tb);
return res;
}
#else
static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) {
int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0;
if (!res) {
res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2);
}
return res;
}
#endif
static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) {
Py_ssize_t i, n;
assert(PyExceptionClass_Check(exc_type));
n = PyTuple_GET_SIZE(tuple);
#if PY_MAJOR_VERSION >= 3
for (i=0; i<n; i++) {
if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1;
}
#endif
for (i=0; i<n; i++) {
PyObject *t = PyTuple_GET_ITEM(tuple, i);
#if PY_MAJOR_VERSION < 3
if (likely(exc_type == t)) return 1;
#endif
if (likely(PyExceptionClass_Check(t))) {
if (__Pyx_inner_PyErr_GivenExceptionMatches2(exc_type, NULL, t)) return 1;
} else {
}
}
return 0;
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) {
if (likely(err == exc_type)) return 1;
if (likely(PyExceptionClass_Check(err))) {
if (likely(PyExceptionClass_Check(exc_type))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type);
} else if (likely(PyTuple_Check(exc_type))) {
return __Pyx_PyErr_GivenExceptionMatchesTuple(err, exc_type);
} else {
}
}
return PyErr_GivenExceptionMatches(err, exc_type);
}
static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) {
assert(PyExceptionClass_Check(exc_type1));
assert(PyExceptionClass_Check(exc_type2));
if (likely(err == exc_type1 || err == exc_type2)) return 1;
if (likely(PyExceptionClass_Check(err))) {
return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2);
}
return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2));
}
#endif
/* PyIntBinop */
#if !CYTHON_COMPILING_IN_PYPY
static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, int inplace, int zerodivision_check) {
(void)inplace;
(void)zerodivision_check;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(op1))) {
const long b = intval;
long x;
long a = PyInt_AS_LONG(op1);
x = (long)((unsigned long)a + b);
if (likely((x^a) >= 0 || (x^b) >= 0))
return PyInt_FromLong(x);
return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
#endif
#if CYTHON_USE_PYLONG_INTERNALS
if (likely(PyLong_CheckExact(op1))) {
const long b = intval;
long a, x;
#ifdef HAVE_LONG_LONG
const PY_LONG_LONG llb = intval;
PY_LONG_LONG lla, llx;
#endif
const digit* digits = ((PyLongObject*)op1)->ob_digit;
const Py_ssize_t size = Py_SIZE(op1);
if (likely(__Pyx_sst_abs(size) <= 1)) {
a = likely(size) ? digits[0] : 0;
if (size == -1) a = -a;
} else {
switch (size) {
case -2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 2:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 3:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case -4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
case 4:
if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]));
break;
#ifdef HAVE_LONG_LONG
} else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) {
lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]));
goto long_long;
#endif
}
CYTHON_FALLTHROUGH;
default: return PyLong_Type.tp_as_number->nb_add(op1, op2);
}
}
x = a + b;
return PyLong_FromLong(x);
#ifdef HAVE_LONG_LONG
long_long:
llx = lla + llb;
return PyLong_FromLongLong(llx);
#endif
}
#endif
if (PyFloat_CheckExact(op1)) {
const long b = intval;
double a = PyFloat_AS_DOUBLE(op1);
double result;
PyFPE_START_PROTECT("add", return NULL)
result = ((double)a) + (double)b;
PyFPE_END_PROTECT(result)
return PyFloat_FromDouble(result);
}
return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2);
}
#endif
/* None */
static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) {
PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname);
}
/* None */
static CYTHON_INLINE long __Pyx_div_long(long a, long b) {
long q = a / b;
long r = a - q*b;
q -= ((r != 0) & ((r ^ b) < 0));
return q;
}
/* ImportFrom */
static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) {
PyObject* value = __Pyx_PyObject_GetAttrStr(module, name);
if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) {
PyErr_Format(PyExc_ImportError,
#if PY_MAJOR_VERSION < 3
"cannot import name %.230s", PyString_AS_STRING(name));
#else
"cannot import name %S", name);
#endif
}
return value;
}
/* HasAttr */
static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) {
PyObject *r;
if (unlikely(!__Pyx_PyBaseString_Check(n))) {
PyErr_SetString(PyExc_TypeError,
"hasattr(): attribute name must be string");
return -1;
}
r = __Pyx_GetAttr(o, n);
if (unlikely(!r)) {
PyErr_Clear();
return 0;
} else {
Py_DECREF(r);
return 1;
}
}
/* PyObject_GenericGetAttrNoDict */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) {
PyErr_Format(PyExc_AttributeError,
#if PY_MAJOR_VERSION >= 3
"'%.50s' object has no attribute '%U'",
tp->tp_name, attr_name);
#else
"'%.50s' object has no attribute '%.400s'",
tp->tp_name, PyString_AS_STRING(attr_name));
#endif
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) {
PyObject *descr;
PyTypeObject *tp = Py_TYPE(obj);
if (unlikely(!PyString_Check(attr_name))) {
return PyObject_GenericGetAttr(obj, attr_name);
}
assert(!tp->tp_dictoffset);
descr = _PyType_Lookup(tp, attr_name);
if (unlikely(!descr)) {
return __Pyx_RaiseGenericGetAttributeError(tp, attr_name);
}
Py_INCREF(descr);
#if PY_MAJOR_VERSION < 3
if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS)))
#endif
{
descrgetfunc f = Py_TYPE(descr)->tp_descr_get;
if (unlikely(f)) {
PyObject *res = f(descr, obj, (PyObject *)tp);
Py_DECREF(descr);
return res;
}
}
return descr;
}
#endif
/* PyObject_GenericGetAttr */
#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000
static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) {
if (unlikely(Py_TYPE(obj)->tp_dictoffset)) {
return PyObject_GenericGetAttr(obj, attr_name);
}
return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name);
}
#endif
/* SetVTable */
static int __Pyx_SetVtable(PyObject *dict, void *vtable) {
#if PY_VERSION_HEX >= 0x02070000
PyObject *ob = PyCapsule_New(vtable, 0, 0);
#else
PyObject *ob = PyCObject_FromVoidPtr(vtable, 0);
#endif
if (!ob)
goto bad;
if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0)
goto bad;
Py_DECREF(ob);
return 0;
bad:
Py_XDECREF(ob);
return -1;
}
/* PyObjectGetAttrStrNoError */
static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) {
__Pyx_PyThreadState_declare
__Pyx_PyThreadState_assign
if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError)))
__Pyx_PyErr_Clear();
}
static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) {
PyObject *result;
#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1
PyTypeObject* tp = Py_TYPE(obj);
if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) {
return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1);
}
#endif
result = __Pyx_PyObject_GetAttrStr(obj, attr_name);
if (unlikely(!result)) {
__Pyx_PyObject_GetAttrStr_ClearAttributeError();
}
return result;
}
/* SetupReduce */
static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) {
int ret;
PyObject *name_attr;
name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2);
if (likely(name_attr)) {
ret = PyObject_RichCompareBool(name_attr, name, Py_EQ);
} else {
ret = -1;
}
if (unlikely(ret < 0)) {
PyErr_Clear();
ret = 0;
}
Py_XDECREF(name_attr);
return ret;
}
static int __Pyx_setup_reduce(PyObject* type_obj) {
int ret = 0;
PyObject *object_reduce = NULL;
PyObject *object_reduce_ex = NULL;
PyObject *reduce = NULL;
PyObject *reduce_ex = NULL;
PyObject *reduce_cython = NULL;
PyObject *setstate = NULL;
PyObject *setstate_cython = NULL;
#if CYTHON_USE_PYTYPE_LOOKUP
if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#else
if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto __PYX_GOOD;
#endif
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#else
object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD;
#endif
reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD;
if (reduce_ex == object_reduce_ex) {
#if CYTHON_USE_PYTYPE_LOOKUP
object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#else
object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD;
#endif
reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD;
if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) {
reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython);
if (likely(reduce_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (reduce == object_reduce || PyErr_Occurred()) {
goto __PYX_BAD;
}
setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate);
if (!setstate) PyErr_Clear();
if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) {
setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython);
if (likely(setstate_cython)) {
ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD;
} else if (!setstate || PyErr_Occurred()) {
goto __PYX_BAD;
}
}
PyType_Modified((PyTypeObject*)type_obj);
}
}
goto __PYX_GOOD;
__PYX_BAD:
if (!PyErr_Occurred())
PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name);
ret = -1;
__PYX_GOOD:
#if !CYTHON_USE_PYTYPE_LOOKUP
Py_XDECREF(object_reduce);
Py_XDECREF(object_reduce_ex);
#endif
Py_XDECREF(reduce);
Py_XDECREF(reduce_ex);
Py_XDECREF(reduce_cython);
Py_XDECREF(setstate);
Py_XDECREF(setstate_cython);
return ret;
}
/* TypeImport */
#ifndef __PYX_HAVE_RT_ImportType
#define __PYX_HAVE_RT_ImportType
static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name,
size_t size, enum __Pyx_ImportType_CheckSize check_size)
{
PyObject *result = 0;
char warning[200];
Py_ssize_t basicsize;
#ifdef Py_LIMITED_API
PyObject *py_basicsize;
#endif
result = PyObject_GetAttrString(module, class_name);
if (!result)
goto bad;
if (!PyType_Check(result)) {
PyErr_Format(PyExc_TypeError,
"%.200s.%.200s is not a type object",
module_name, class_name);
goto bad;
}
#ifndef Py_LIMITED_API
basicsize = ((PyTypeObject *)result)->tp_basicsize;
#else
py_basicsize = PyObject_GetAttrString(result, "__basicsize__");
if (!py_basicsize)
goto bad;
basicsize = PyLong_AsSsize_t(py_basicsize);
Py_DECREF(py_basicsize);
py_basicsize = 0;
if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred())
goto bad;
#endif
if ((size_t)basicsize < size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) {
PyErr_Format(PyExc_ValueError,
"%.200s.%.200s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
goto bad;
}
else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) {
PyOS_snprintf(warning, sizeof(warning),
"%s.%s size changed, may indicate binary incompatibility. "
"Expected %zd from C header, got %zd from PyObject",
module_name, class_name, size, basicsize);
if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad;
}
return (PyTypeObject *)result;
bad:
Py_XDECREF(result);
return NULL;
}
#endif
/* CLineInTraceback */
#ifndef CYTHON_CLINE_IN_TRACEBACK
static int __Pyx_CLineForTraceback(CYTHON_NCP_UNUSED PyThreadState *tstate, int c_line) {
PyObject *use_cline;
PyObject *ptype, *pvalue, *ptraceback;
#if CYTHON_COMPILING_IN_CPYTHON
PyObject **cython_runtime_dict;
#endif
if (unlikely(!__pyx_cython_runtime)) {
return c_line;
}
__Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback);
#if CYTHON_COMPILING_IN_CPYTHON
cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime);
if (likely(cython_runtime_dict)) {
__PYX_PY_DICT_LOOKUP_IF_MODIFIED(
use_cline, *cython_runtime_dict,
__Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback))
} else
#endif
{
PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback);
if (use_cline_obj) {
use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True;
Py_DECREF(use_cline_obj);
} else {
PyErr_Clear();
use_cline = NULL;
}
}
if (!use_cline) {
c_line = 0;
PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False);
}
else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) {
c_line = 0;
}
__Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback);
return c_line;
}
#endif
/* CodeObjectCache */
static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) {
int start = 0, mid = 0, end = count - 1;
if (end >= 0 && code_line > entries[end].code_line) {
return count;
}
while (start < end) {
mid = start + (end - start) / 2;
if (code_line < entries[mid].code_line) {
end = mid;
} else if (code_line > entries[mid].code_line) {
start = mid + 1;
} else {
return mid;
}
}
if (code_line <= entries[mid].code_line) {
return mid;
} else {
return mid + 1;
}
}
static PyCodeObject *__pyx_find_code_object(int code_line) {
PyCodeObject* code_object;
int pos;
if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) {
return NULL;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) {
return NULL;
}
code_object = __pyx_code_cache.entries[pos].code_object;
Py_INCREF(code_object);
return code_object;
}
static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) {
int pos, i;
__Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries;
if (unlikely(!code_line)) {
return;
}
if (unlikely(!entries)) {
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry));
if (likely(entries)) {
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = 64;
__pyx_code_cache.count = 1;
entries[0].code_line = code_line;
entries[0].code_object = code_object;
Py_INCREF(code_object);
}
return;
}
pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line);
if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) {
PyCodeObject* tmp = entries[pos].code_object;
entries[pos].code_object = code_object;
Py_DECREF(tmp);
return;
}
if (__pyx_code_cache.count == __pyx_code_cache.max_count) {
int new_max = __pyx_code_cache.max_count + 64;
entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc(
__pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry));
if (unlikely(!entries)) {
return;
}
__pyx_code_cache.entries = entries;
__pyx_code_cache.max_count = new_max;
}
for (i=__pyx_code_cache.count; i>pos; i--) {
entries[i] = entries[i-1];
}
entries[pos].code_line = code_line;
entries[pos].code_object = code_object;
__pyx_code_cache.count++;
Py_INCREF(code_object);
}
/* AddTraceback */
#include "compile.h"
#include "frameobject.h"
#include "traceback.h"
static PyCodeObject* __Pyx_CreateCodeObjectForTraceback(
const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyObject *py_srcfile = 0;
PyObject *py_funcname = 0;
#if PY_MAJOR_VERSION < 3
py_srcfile = PyString_FromString(filename);
#else
py_srcfile = PyUnicode_FromString(filename);
#endif
if (!py_srcfile) goto bad;
if (c_line) {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#else
py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line);
#endif
}
else {
#if PY_MAJOR_VERSION < 3
py_funcname = PyString_FromString(funcname);
#else
py_funcname = PyUnicode_FromString(funcname);
#endif
}
if (!py_funcname) goto bad;
py_code = __Pyx_PyCode_New(
0,
0,
0,
0,
0,
__pyx_empty_bytes, /*PyObject *code,*/
__pyx_empty_tuple, /*PyObject *consts,*/
__pyx_empty_tuple, /*PyObject *names,*/
__pyx_empty_tuple, /*PyObject *varnames,*/
__pyx_empty_tuple, /*PyObject *freevars,*/
__pyx_empty_tuple, /*PyObject *cellvars,*/
py_srcfile, /*PyObject *filename,*/
py_funcname, /*PyObject *name,*/
py_line,
__pyx_empty_bytes /*PyObject *lnotab*/
);
Py_DECREF(py_srcfile);
Py_DECREF(py_funcname);
return py_code;
bad:
Py_XDECREF(py_srcfile);
Py_XDECREF(py_funcname);
return NULL;
}
static void __Pyx_AddTraceback(const char *funcname, int c_line,
int py_line, const char *filename) {
PyCodeObject *py_code = 0;
PyFrameObject *py_frame = 0;
PyThreadState *tstate = __Pyx_PyThreadState_Current;
if (c_line) {
c_line = __Pyx_CLineForTraceback(tstate, c_line);
}
py_code = __pyx_find_code_object(c_line ? -c_line : py_line);
if (!py_code) {
py_code = __Pyx_CreateCodeObjectForTraceback(
funcname, c_line, py_line, filename);
if (!py_code) goto bad;
__pyx_insert_code_object(c_line ? -c_line : py_line, py_code);
}
py_frame = PyFrame_New(
tstate, /*PyThreadState *tstate,*/
py_code, /*PyCodeObject *code,*/
__pyx_d, /*PyObject *globals,*/
0 /*PyObject *locals*/
);
if (!py_frame) goto bad;
__Pyx_PyFrame_SetLineNumber(py_frame, py_line);
PyTraceBack_Here(py_frame);
bad:
Py_XDECREF(py_code);
Py_XDECREF(py_frame);
}
#if PY_MAJOR_VERSION < 3
static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) {
if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags);
if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags);
PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name);
return -1;
}
static void __Pyx_ReleaseBuffer(Py_buffer *view) {
PyObject *obj = view->obj;
if (!obj) return;
if (PyObject_CheckBuffer(obj)) {
PyBuffer_Release(view);
return;
}
if ((0)) {}
view->obj = NULL;
Py_DECREF(obj);
}
#endif
/* MemviewSliceIsContig */
static int
__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim)
{
int i, index, step, start;
Py_ssize_t itemsize = mvs.memview->view.itemsize;
if (order == 'F') {
step = 1;
start = 0;
} else {
step = -1;
start = ndim - 1;
}
for (i = 0; i < ndim; i++) {
index = start + step * i;
if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize)
return 0;
itemsize *= mvs.shape[index];
}
return 1;
}
/* OverlappingSlices */
static void
__pyx_get_array_memory_extents(__Pyx_memviewslice *slice,
void **out_start, void **out_end,
int ndim, size_t itemsize)
{
char *start, *end;
int i;
start = end = slice->data;
for (i = 0; i < ndim; i++) {
Py_ssize_t stride = slice->strides[i];
Py_ssize_t extent = slice->shape[i];
if (extent == 0) {
*out_start = *out_end = start;
return;
} else {
if (stride > 0)
end += stride * (extent - 1);
else
start += stride * (extent - 1);
}
}
*out_start = start;
*out_end = end + itemsize;
}
static int
__pyx_slices_overlap(__Pyx_memviewslice *slice1,
__Pyx_memviewslice *slice2,
int ndim, size_t itemsize)
{
void *start1, *end1, *start2, *end2;
__pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize);
__pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize);
return (start1 < end2) && (start2 < end1);
}
/* Capsule */
static CYTHON_INLINE PyObject *
__pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig)
{
PyObject *cobj;
#if PY_VERSION_HEX >= 0x02070000
cobj = PyCapsule_New(p, sig, NULL);
#else
cobj = PyCObject_FromVoidPtr(p, NULL);
#endif
return cobj;
}
/* IsLittleEndian */
static CYTHON_INLINE int __Pyx_Is_Little_Endian(void)
{
union {
uint32_t u32;
uint8_t u8[4];
} S;
S.u32 = 0x01020304;
return S.u8[0] == 4;
}
/* BufferFormatCheck */
static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx,
__Pyx_BufFmt_StackElem* stack,
__Pyx_TypeInfo* type) {
stack[0].field = &ctx->root;
stack[0].parent_offset = 0;
ctx->root.type = type;
ctx->root.name = "buffer dtype";
ctx->root.offset = 0;
ctx->head = stack;
ctx->head->field = &ctx->root;
ctx->fmt_offset = 0;
ctx->head->parent_offset = 0;
ctx->new_packmode = '@';
ctx->enc_packmode = '@';
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->is_complex = 0;
ctx->is_valid_array = 0;
ctx->struct_alignment = 0;
while (type->typegroup == 'S') {
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = 0;
type = type->fields->type;
}
}
static int __Pyx_BufFmt_ParseNumber(const char** ts) {
int count;
const char* t = *ts;
if (*t < '0' || *t > '9') {
return -1;
} else {
count = *t++ - '0';
while (*t >= '0' && *t <= '9') {
count *= 10;
count += *t++ - '0';
}
}
*ts = t;
return count;
}
static int __Pyx_BufFmt_ExpectNumber(const char **ts) {
int number = __Pyx_BufFmt_ParseNumber(ts);
if (number == -1)
PyErr_Format(PyExc_ValueError,\
"Does not understand character buffer dtype format string ('%c')", **ts);
return number;
}
static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) {
PyErr_Format(PyExc_ValueError,
"Unexpected format string character: '%c'", ch);
}
static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) {
switch (ch) {
case '?': return "'bool'";
case 'c': return "'char'";
case 'b': return "'signed char'";
case 'B': return "'unsigned char'";
case 'h': return "'short'";
case 'H': return "'unsigned short'";
case 'i': return "'int'";
case 'I': return "'unsigned int'";
case 'l': return "'long'";
case 'L': return "'unsigned long'";
case 'q': return "'long long'";
case 'Q': return "'unsigned long long'";
case 'f': return (is_complex ? "'complex float'" : "'float'");
case 'd': return (is_complex ? "'complex double'" : "'double'");
case 'g': return (is_complex ? "'complex long double'" : "'long double'");
case 'T': return "a struct";
case 'O': return "Python object";
case 'P': return "a pointer";
case 's': case 'p': return "a string";
case 0: return "end";
default: return "unparseable format string";
}
}
static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return 2;
case 'i': case 'I': case 'l': case 'L': return 4;
case 'q': case 'Q': return 8;
case 'f': return (is_complex ? 8 : 4);
case 'd': return (is_complex ? 16 : 8);
case 'g': {
PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g')..");
return 0;
}
case 'O': case 'P': return sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(short);
case 'i': case 'I': return sizeof(int);
case 'l': case 'L': return sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(float) * (is_complex ? 2 : 1);
case 'd': return sizeof(double) * (is_complex ? 2 : 1);
case 'g': return sizeof(long double) * (is_complex ? 2 : 1);
case 'O': case 'P': return sizeof(void*);
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
typedef struct { char c; short x; } __Pyx_st_short;
typedef struct { char c; int x; } __Pyx_st_int;
typedef struct { char c; long x; } __Pyx_st_long;
typedef struct { char c; float x; } __Pyx_st_float;
typedef struct { char c; double x; } __Pyx_st_double;
typedef struct { char c; long double x; } __Pyx_st_longdouble;
typedef struct { char c; void *x; } __Pyx_st_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_st_float) - sizeof(float);
case 'd': return sizeof(__Pyx_st_double) - sizeof(double);
case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
/* These are for computing the padding at the end of the struct to align
on the first member of the struct. This will probably the same as above,
but we don't have any guarantees.
*/
typedef struct { short x; char c; } __Pyx_pad_short;
typedef struct { int x; char c; } __Pyx_pad_int;
typedef struct { long x; char c; } __Pyx_pad_long;
typedef struct { float x; char c; } __Pyx_pad_float;
typedef struct { double x; char c; } __Pyx_pad_double;
typedef struct { long double x; char c; } __Pyx_pad_longdouble;
typedef struct { void *x; char c; } __Pyx_pad_void_p;
#ifdef HAVE_LONG_LONG
typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong;
#endif
static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) {
switch (ch) {
case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1;
case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short);
case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int);
case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long);
#ifdef HAVE_LONG_LONG
case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG);
#endif
case 'f': return sizeof(__Pyx_pad_float) - sizeof(float);
case 'd': return sizeof(__Pyx_pad_double) - sizeof(double);
case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double);
case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*);
default:
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) {
switch (ch) {
case 'c':
return 'H';
case 'b': case 'h': case 'i':
case 'l': case 'q': case 's': case 'p':
return 'I';
case '?': case 'B': case 'H': case 'I': case 'L': case 'Q':
return 'U';
case 'f': case 'd': case 'g':
return (is_complex ? 'C' : 'R');
case 'O':
return 'O';
case 'P':
return 'P';
default: {
__Pyx_BufFmt_RaiseUnexpectedChar(ch);
return 0;
}
}
}
static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) {
if (ctx->head == NULL || ctx->head->field == &ctx->root) {
const char* expected;
const char* quote;
if (ctx->head == NULL) {
expected = "end";
quote = "";
} else {
expected = ctx->head->field->type->name;
quote = "'";
}
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected %s%s%s but got %s",
quote, expected, quote,
__Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex));
} else {
__Pyx_StructField* field = ctx->head->field;
__Pyx_StructField* parent = (ctx->head - 1)->field;
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'",
field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex),
parent->type->name, field->name);
}
}
static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) {
char group;
size_t size, offset, arraysize = 1;
if (ctx->enc_type == 0) return 0;
if (ctx->head->field->type->arraysize[0]) {
int i, ndim = 0;
if (ctx->enc_type == 's' || ctx->enc_type == 'p') {
ctx->is_valid_array = ctx->head->field->type->ndim == 1;
ndim = 1;
if (ctx->enc_count != ctx->head->field->type->arraysize[0]) {
PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %zu",
ctx->head->field->type->arraysize[0], ctx->enc_count);
return -1;
}
}
if (!ctx->is_valid_array) {
PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d",
ctx->head->field->type->ndim, ndim);
return -1;
}
for (i = 0; i < ctx->head->field->type->ndim; i++) {
arraysize *= ctx->head->field->type->arraysize[i];
}
ctx->is_valid_array = 0;
ctx->enc_count = 1;
}
group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex);
do {
__Pyx_StructField* field = ctx->head->field;
__Pyx_TypeInfo* type = field->type;
if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') {
size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex);
} else {
size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex);
}
if (ctx->enc_packmode == '@') {
size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex);
size_t align_mod_offset;
if (align_at == 0) return -1;
align_mod_offset = ctx->fmt_offset % align_at;
if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset;
if (ctx->struct_alignment == 0)
ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type,
ctx->is_complex);
}
if (type->size != size || type->typegroup != group) {
if (type->typegroup == 'C' && type->fields != NULL) {
size_t parent_offset = ctx->head->parent_offset + field->offset;
++ctx->head;
ctx->head->field = type->fields;
ctx->head->parent_offset = parent_offset;
continue;
}
if ((type->typegroup == 'H' || group == 'H') && type->size == size) {
} else {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
}
offset = ctx->head->parent_offset + field->offset;
if (ctx->fmt_offset != offset) {
PyErr_Format(PyExc_ValueError,
"Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected",
(Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset);
return -1;
}
ctx->fmt_offset += size;
if (arraysize)
ctx->fmt_offset += (arraysize - 1) * size;
--ctx->enc_count;
while (1) {
if (field == &ctx->root) {
ctx->head = NULL;
if (ctx->enc_count != 0) {
__Pyx_BufFmt_RaiseExpected(ctx);
return -1;
}
break;
}
ctx->head->field = ++field;
if (field->type == NULL) {
--ctx->head;
field = ctx->head->field;
continue;
} else if (field->type->typegroup == 'S') {
size_t parent_offset = ctx->head->parent_offset + field->offset;
if (field->type->fields->type == NULL) continue;
field = field->type->fields;
++ctx->head;
ctx->head->field = field;
ctx->head->parent_offset = parent_offset;
break;
} else {
break;
}
}
} while (ctx->enc_count);
ctx->enc_type = 0;
ctx->is_complex = 0;
return 0;
}
static PyObject *
__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp)
{
const char *ts = *tsp;
int i = 0, number, ndim;
++ts;
if (ctx->new_count != 1) {
PyErr_SetString(PyExc_ValueError,
"Cannot handle repeated arrays in format string");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ndim = ctx->head->field->type->ndim;
while (*ts && *ts != ')') {
switch (*ts) {
case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue;
default: break;
}
number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i])
return PyErr_Format(PyExc_ValueError,
"Expected a dimension of size %zu, got %d",
ctx->head->field->type->arraysize[i], number);
if (*ts != ',' && *ts != ')')
return PyErr_Format(PyExc_ValueError,
"Expected a comma in format string, got '%c'", *ts);
if (*ts == ',') ts++;
i++;
}
if (i != ndim)
return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d",
ctx->head->field->type->ndim, i);
if (!*ts) {
PyErr_SetString(PyExc_ValueError,
"Unexpected end of format string, expected ')'");
return NULL;
}
ctx->is_valid_array = 1;
ctx->new_count = 1;
*tsp = ++ts;
return Py_None;
}
static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) {
int got_Z = 0;
while (1) {
switch(*ts) {
case 0:
if (ctx->enc_type != 0 && ctx->head == NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
if (ctx->head != NULL) {
__Pyx_BufFmt_RaiseExpected(ctx);
return NULL;
}
return ts;
case ' ':
case '\r':
case '\n':
++ts;
break;
case '<':
if (!__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '>':
case '!':
if (__Pyx_Is_Little_Endian()) {
PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler");
return NULL;
}
ctx->new_packmode = '=';
++ts;
break;
case '=':
case '@':
case '^':
ctx->new_packmode = *ts++;
break;
case 'T':
{
const char* ts_after_sub;
size_t i, struct_count = ctx->new_count;
size_t struct_alignment = ctx->struct_alignment;
ctx->new_count = 1;
++ts;
if (*ts != '{') {
PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'");
return NULL;
}
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
ctx->enc_count = 0;
ctx->struct_alignment = 0;
++ts;
ts_after_sub = ts;
for (i = 0; i != struct_count; ++i) {
ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts);
if (!ts_after_sub) return NULL;
}
ts = ts_after_sub;
if (struct_alignment) ctx->struct_alignment = struct_alignment;
}
break;
case '}':
{
size_t alignment = ctx->struct_alignment;
++ts;
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_type = 0;
if (alignment && ctx->fmt_offset % alignment) {
ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment);
}
}
return ts;
case 'x':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->fmt_offset += ctx->new_count;
ctx->new_count = 1;
ctx->enc_count = 0;
ctx->enc_type = 0;
ctx->enc_packmode = ctx->new_packmode;
++ts;
break;
case 'Z':
got_Z = 1;
++ts;
if (*ts != 'f' && *ts != 'd' && *ts != 'g') {
__Pyx_BufFmt_RaiseUnexpectedChar('Z');
return NULL;
}
CYTHON_FALLTHROUGH;
case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I':
case 'l': case 'L': case 'q': case 'Q':
case 'f': case 'd': case 'g':
case 'O': case 'p':
if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) &&
(ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) {
ctx->enc_count += ctx->new_count;
ctx->new_count = 1;
got_Z = 0;
++ts;
break;
}
CYTHON_FALLTHROUGH;
case 's':
if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL;
ctx->enc_count = ctx->new_count;
ctx->enc_packmode = ctx->new_packmode;
ctx->enc_type = *ts;
ctx->is_complex = got_Z;
++ts;
ctx->new_count = 1;
got_Z = 0;
break;
case ':':
++ts;
while(*ts != ':') ++ts;
++ts;
break;
case '(':
if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL;
break;
default:
{
int number = __Pyx_BufFmt_ExpectNumber(&ts);
if (number == -1) return NULL;
ctx->new_count = (size_t)number;
}
}
}
}
/* TypeInfoCompare */
static int
__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b)
{
int i;
if (!a || !b)
return 0;
if (a == b)
return 1;
if (a->size != b->size || a->typegroup != b->typegroup ||
a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) {
if (a->typegroup == 'H' || b->typegroup == 'H') {
return a->size == b->size;
} else {
return 0;
}
}
if (a->ndim) {
for (i = 0; i < a->ndim; i++)
if (a->arraysize[i] != b->arraysize[i])
return 0;
}
if (a->typegroup == 'S') {
if (a->flags != b->flags)
return 0;
if (a->fields || b->fields) {
if (!(a->fields && b->fields))
return 0;
for (i = 0; a->fields[i].type && b->fields[i].type; i++) {
__Pyx_StructField *field_a = a->fields + i;
__Pyx_StructField *field_b = b->fields + i;
if (field_a->offset != field_b->offset ||
!__pyx_typeinfo_cmp(field_a->type, field_b->type))
return 0;
}
return !a->fields[i].type && !b->fields[i].type;
}
}
return 1;
}
/* MemviewSliceValidateAndInit */
static int
__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec)
{
if (buf->shape[dim] <= 1)
return 1;
if (buf->strides) {
if (spec & __Pyx_MEMVIEW_CONTIG) {
if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) {
if (unlikely(buf->strides[dim] != sizeof(void *))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly contiguous "
"in dimension %d.", dim);
goto fail;
}
} else if (unlikely(buf->strides[dim] != buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_FOLLOW) {
Py_ssize_t stride = buf->strides[dim];
if (stride < 0)
stride = -stride;
if (unlikely(stride < buf->itemsize)) {
PyErr_SetString(PyExc_ValueError,
"Buffer and memoryview are not contiguous "
"in the same dimension.");
goto fail;
}
}
} else {
if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not contiguous in "
"dimension %d", dim);
goto fail;
} else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) {
PyErr_Format(PyExc_ValueError,
"C-contiguous buffer is not indirect in "
"dimension %d", dim);
goto fail;
} else if (unlikely(buf->suboffsets)) {
PyErr_SetString(PyExc_ValueError,
"Buffer exposes suboffsets but no strides");
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec)
{
if (spec & __Pyx_MEMVIEW_DIRECT) {
if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) {
PyErr_Format(PyExc_ValueError,
"Buffer not compatible with direct access "
"in dimension %d.", dim);
goto fail;
}
}
if (spec & __Pyx_MEMVIEW_PTR) {
if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) {
PyErr_Format(PyExc_ValueError,
"Buffer is not indirectly accessible "
"in dimension %d.", dim);
goto fail;
}
}
return 1;
fail:
return 0;
}
static int
__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag)
{
int i;
if (c_or_f_flag & __Pyx_IS_F_CONTIG) {
Py_ssize_t stride = 1;
for (i = 0; i < ndim; i++) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not fortran contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
} else if (c_or_f_flag & __Pyx_IS_C_CONTIG) {
Py_ssize_t stride = 1;
for (i = ndim - 1; i >- 1; i--) {
if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) {
PyErr_SetString(PyExc_ValueError,
"Buffer not C contiguous.");
goto fail;
}
stride = stride * buf->shape[i];
}
}
return 1;
fail:
return 0;
}
static int __Pyx_ValidateAndInit_memviewslice(
int *axes_specs,
int c_or_f_flag,
int buf_flags,
int ndim,
__Pyx_TypeInfo *dtype,
__Pyx_BufFmt_StackElem stack[],
__Pyx_memviewslice *memviewslice,
PyObject *original_obj)
{
struct __pyx_memoryview_obj *memview, *new_memview;
__Pyx_RefNannyDeclarations
Py_buffer *buf;
int i, spec = 0, retval = -1;
__Pyx_BufFmt_Context ctx;
int from_memoryview = __pyx_memoryview_check(original_obj);
__Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0);
if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *)
original_obj)->typeinfo)) {
memview = (struct __pyx_memoryview_obj *) original_obj;
new_memview = NULL;
} else {
memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
original_obj, buf_flags, 0, dtype);
new_memview = memview;
if (unlikely(!memview))
goto fail;
}
buf = &memview->view;
if (unlikely(buf->ndim != ndim)) {
PyErr_Format(PyExc_ValueError,
"Buffer has wrong number of dimensions (expected %d, got %d)",
ndim, buf->ndim);
goto fail;
}
if (new_memview) {
__Pyx_BufFmt_Init(&ctx, stack, dtype);
if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail;
}
if (unlikely((unsigned) buf->itemsize != dtype->size)) {
PyErr_Format(PyExc_ValueError,
"Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) "
"does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)",
buf->itemsize,
(buf->itemsize > 1) ? "s" : "",
dtype->name,
dtype->size,
(dtype->size > 1) ? "s" : "");
goto fail;
}
if (buf->len > 0) {
for (i = 0; i < ndim; i++) {
spec = axes_specs[i];
if (unlikely(!__pyx_check_strides(buf, i, ndim, spec)))
goto fail;
if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec)))
goto fail;
}
if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)))
goto fail;
}
if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice,
new_memview != NULL) == -1)) {
goto fail;
}
retval = 0;
goto no_fail;
fail:
Py_XDECREF(new_memview);
retval = -1;
no_fail:
__Pyx_RefNannyFinishContext();
return retval;
}
/* ObjectToMemviewSlice */
static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_float(PyObject *obj, int writable_flag) {
__Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } };
__Pyx_BufFmt_StackElem stack[1];
int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) };
int retcode;
if (obj == Py_None) {
result.memview = (struct __pyx_memoryview_obj *) Py_None;
return result;
}
retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0,
PyBUF_RECORDS_RO | writable_flag, 1,
&__Pyx_TypeInfo_float, stack,
&result, obj);
if (unlikely(retcode == -1))
goto __pyx_fail;
return result;
__pyx_fail:
result.memview = NULL;
result.data = NULL;
return result;
}
/* MemviewDtypeToObject */
static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) {
return (PyObject *) PyFloat_FromDouble(*(float *) itemp);
}
static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) {
float value = __pyx_PyFloat_AsFloat(obj);
if ((value == (float)-1) && PyErr_Occurred())
return 0;
*(float *) itemp = value;
return 1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(int) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(int) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(int) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(int),
little, !is_unsigned);
}
}
/* CIntFromPyVerify */
#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0)
#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\
__PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1)
#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\
{\
func_type value = func_value;\
if (sizeof(target_type) < sizeof(func_type)) {\
if (unlikely(value != (func_type) (target_type) value)) {\
func_type zero = 0;\
if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\
return (target_type) -1;\
if (is_unsigned && unlikely(value < zero))\
goto raise_neg_overflow;\
else\
goto raise_overflow;\
}\
}\
return (target_type) value;\
}
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return ::std::complex< float >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
return x + y*(__pyx_t_float_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) {
__pyx_t_float_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabsf(b.real) >= fabsf(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
float r = b.imag / b.real;
float s = (float)(1.0) / (b.real + b.imag * r);
return __pyx_t_float_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
float r = b.real / b.imag;
float s = (float)(1.0) / (b.imag + b.real * r);
return __pyx_t_float_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
if (b.imag == 0) {
return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
float denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_float_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) {
__pyx_t_float_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrtf(z.real*z.real + z.imag*z.imag);
#else
return hypotf(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) {
__pyx_t_float_complex z;
float r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
float denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_float(a, a);
case 3:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, a);
case 4:
z = __Pyx_c_prod_float(a, a);
return __Pyx_c_prod_float(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = powf(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2f(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_float(a);
theta = atan2f(a.imag, a.real);
}
lnr = logf(r);
z_r = expf(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cosf(z_theta);
z.imag = z_r * sinf(z_theta);
return z;
}
#endif
#endif
/* Declarations */
#if CYTHON_CCOMPLEX
#ifdef __cplusplus
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return ::std::complex< double >(x, y);
}
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
return x + y*(__pyx_t_double_complex)_Complex_I;
}
#endif
#else
static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) {
__pyx_t_double_complex z;
z.real = x;
z.imag = y;
return z;
}
#endif
/* Arithmetic */
#if CYTHON_CCOMPLEX
#else
static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
return (a.real == b.real) && (a.imag == b.imag);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real + b.real;
z.imag = a.imag + b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real - b.real;
z.imag = a.imag - b.imag;
return z;
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
z.real = a.real * b.real - a.imag * b.imag;
z.imag = a.real * b.imag + a.imag * b.real;
return z;
}
#if 1
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else if (fabs(b.real) >= fabs(b.imag)) {
if (b.real == 0 && b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag);
} else {
double r = b.imag / b.real;
double s = (double)(1.0) / (b.real + b.imag * r);
return __pyx_t_double_complex_from_parts(
(a.real + a.imag * r) * s, (a.imag - a.real * r) * s);
}
} else {
double r = b.real / b.imag;
double s = (double)(1.0) / (b.imag + b.real * r);
return __pyx_t_double_complex_from_parts(
(a.real * r + a.imag) * s, (a.imag * r - a.real) * s);
}
}
#else
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
if (b.imag == 0) {
return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real);
} else {
double denom = b.real * b.real + b.imag * b.imag;
return __pyx_t_double_complex_from_parts(
(a.real * b.real + a.imag * b.imag) / denom,
(a.imag * b.real - a.real * b.imag) / denom);
}
}
#endif
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = -a.real;
z.imag = -a.imag;
return z;
}
static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) {
return (a.real == 0) && (a.imag == 0);
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) {
__pyx_t_double_complex z;
z.real = a.real;
z.imag = -a.imag;
return z;
}
#if 1
static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) {
#if !defined(HAVE_HYPOT) || defined(_MSC_VER)
return sqrt(z.real*z.real + z.imag*z.imag);
#else
return hypot(z.real, z.imag);
#endif
}
static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) {
__pyx_t_double_complex z;
double r, lnr, theta, z_r, z_theta;
if (b.imag == 0 && b.real == (int)b.real) {
if (b.real < 0) {
double denom = a.real * a.real + a.imag * a.imag;
a.real = a.real / denom;
a.imag = -a.imag / denom;
b.real = -b.real;
}
switch ((int)b.real) {
case 0:
z.real = 1;
z.imag = 0;
return z;
case 1:
return a;
case 2:
return __Pyx_c_prod_double(a, a);
case 3:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, a);
case 4:
z = __Pyx_c_prod_double(a, a);
return __Pyx_c_prod_double(z, z);
}
}
if (a.imag == 0) {
if (a.real == 0) {
return a;
} else if (b.imag == 0) {
z.real = pow(a.real, b.real);
z.imag = 0;
return z;
} else if (a.real > 0) {
r = a.real;
theta = 0;
} else {
r = -a.real;
theta = atan2(0.0, -1.0);
}
} else {
r = __Pyx_c_abs_double(a);
theta = atan2(a.imag, a.real);
}
lnr = log(r);
z_r = exp(lnr * b.real - theta * b.imag);
z_theta = theta * b.real + lnr * b.imag;
z.real = z_r * cos(z_theta);
z.imag = z_r * sin(z_theta);
return z;
}
#endif
#endif
/* MemviewSliceCopyTemplate */
static __Pyx_memviewslice
__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs,
const char *mode, int ndim,
size_t sizeof_dtype, int contig_flag,
int dtype_is_object)
{
__Pyx_RefNannyDeclarations
int i;
__Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } };
struct __pyx_memoryview_obj *from_memview = from_mvs->memview;
Py_buffer *buf = &from_memview->view;
PyObject *shape_tuple = NULL;
PyObject *temp_int = NULL;
struct __pyx_array_obj *array_obj = NULL;
struct __pyx_memoryview_obj *memview_obj = NULL;
__Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0);
for (i = 0; i < ndim; i++) {
if (unlikely(from_mvs->suboffsets[i] >= 0)) {
PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with "
"indirect dimensions (axis %d)", i);
goto fail;
}
}
shape_tuple = PyTuple_New(ndim);
if (unlikely(!shape_tuple)) {
goto fail;
}
__Pyx_GOTREF(shape_tuple);
for(i = 0; i < ndim; i++) {
temp_int = PyInt_FromSsize_t(from_mvs->shape[i]);
if(unlikely(!temp_int)) {
goto fail;
} else {
PyTuple_SET_ITEM(shape_tuple, i, temp_int);
temp_int = NULL;
}
}
array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL);
if (unlikely(!array_obj)) {
goto fail;
}
__Pyx_GOTREF(array_obj);
memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new(
(PyObject *) array_obj, contig_flag,
dtype_is_object,
from_mvs->memview->typeinfo);
if (unlikely(!memview_obj))
goto fail;
if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0))
goto fail;
if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim,
dtype_is_object) < 0))
goto fail;
goto no_fail;
fail:
__Pyx_XDECREF(new_mvs.memview);
new_mvs.memview = NULL;
new_mvs.data = NULL;
no_fail:
__Pyx_XDECREF(shape_tuple);
__Pyx_XDECREF(temp_int);
__Pyx_XDECREF(array_obj);
__Pyx_RefNannyFinishContext();
return new_mvs;
}
/* CIntFromPy */
static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) {
const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(int) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (int) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0])
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) {
return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) {
return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) {
return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (int) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(int) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (int) 0;
case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0])
case -2:
if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(int) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(int) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(int) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) {
return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])));
}
}
break;
}
#endif
if (sizeof(int) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(int) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
int val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (int) -1;
}
} else {
int val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (int) -1;
val = __Pyx_PyInt_As_int(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to int");
return (int) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to int");
return (int) -1;
}
/* CIntFromPy */
static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) {
const size_t neg_one = (size_t) ((size_t) 0 - (size_t) 1), const_zero = (size_t) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(size_t) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (size_t) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (size_t) 0;
case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0])
case 2:
if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) {
return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) {
return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) {
return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (size_t) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(size_t) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (size_t) 0;
case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0])
case -2:
if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {
return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) {
return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])));
}
}
break;
}
#endif
if (sizeof(size_t) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
size_t val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (size_t) -1;
}
} else {
size_t val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (size_t) -1;
val = __Pyx_PyInt_As_size_t(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to size_t");
return (size_t) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to size_t");
return (size_t) -1;
}
/* CIntFromPy */
static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(long) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (long) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0])
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) {
return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) {
return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) {
return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (long) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(long) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (long) 0;
case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0])
case -2:
if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(long) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(long) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(long) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) {
return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])));
}
}
break;
}
#endif
if (sizeof(long) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
long val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (long) -1;
}
} else {
long val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (long) -1;
val = __Pyx_PyInt_As_long(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to long");
return (long) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to long");
return (long) -1;
}
/* CIntToPy */
static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) {
const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0;
const int is_unsigned = neg_one > const_zero;
if (is_unsigned) {
if (sizeof(long) < sizeof(long)) {
return PyInt_FromLong((long) value);
} else if (sizeof(long) <= sizeof(unsigned long)) {
return PyLong_FromUnsignedLong((unsigned long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) {
return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value);
#endif
}
} else {
if (sizeof(long) <= sizeof(long)) {
return PyInt_FromLong((long) value);
#ifdef HAVE_LONG_LONG
} else if (sizeof(long) <= sizeof(PY_LONG_LONG)) {
return PyLong_FromLongLong((PY_LONG_LONG) value);
#endif
}
}
{
int one = 1; int little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&value;
return _PyLong_FromByteArray(bytes, sizeof(long),
little, !is_unsigned);
}
}
/* CIntFromPy */
static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) {
const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0;
const int is_unsigned = neg_one > const_zero;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x))) {
if (sizeof(char) < sizeof(long)) {
__PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x))
} else {
long val = PyInt_AS_LONG(x);
if (is_unsigned && unlikely(val < 0)) {
goto raise_neg_overflow;
}
return (char) val;
}
} else
#endif
if (likely(PyLong_Check(x))) {
if (is_unsigned) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0])
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) {
return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) {
return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) {
return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]));
}
}
break;
}
#endif
#if CYTHON_COMPILING_IN_CPYTHON
if (unlikely(Py_SIZE(x) < 0)) {
goto raise_neg_overflow;
}
#else
{
int result = PyObject_RichCompareBool(x, Py_False, Py_LT);
if (unlikely(result < 0))
return (char) -1;
if (unlikely(result == 1))
goto raise_neg_overflow;
}
#endif
if (sizeof(char) <= sizeof(unsigned long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x))
#endif
}
} else {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)x)->ob_digit;
switch (Py_SIZE(x)) {
case 0: return (char) 0;
case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0]))
case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0])
case -2:
if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 2:
if (8 * sizeof(char) > 1 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -3:
if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 3:
if (8 * sizeof(char) > 2 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case -4:
if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
case 4:
if (8 * sizeof(char) > 3 * PyLong_SHIFT) {
if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) {
__PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])))
} else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) {
return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])));
}
}
break;
}
#endif
if (sizeof(char) <= sizeof(long)) {
__PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x))
#ifdef HAVE_LONG_LONG
} else if (sizeof(char) <= sizeof(PY_LONG_LONG)) {
__PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x))
#endif
}
}
{
#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray)
PyErr_SetString(PyExc_RuntimeError,
"_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers");
#else
char val;
PyObject *v = __Pyx_PyNumber_IntOrLong(x);
#if PY_MAJOR_VERSION < 3
if (likely(v) && !PyLong_Check(v)) {
PyObject *tmp = v;
v = PyNumber_Long(tmp);
Py_DECREF(tmp);
}
#endif
if (likely(v)) {
int one = 1; int is_little = (int)*(unsigned char *)&one;
unsigned char *bytes = (unsigned char *)&val;
int ret = _PyLong_AsByteArray((PyLongObject *)v,
bytes, sizeof(val),
is_little, !is_unsigned);
Py_DECREF(v);
if (likely(!ret))
return val;
}
#endif
return (char) -1;
}
} else {
char val;
PyObject *tmp = __Pyx_PyNumber_IntOrLong(x);
if (!tmp) return (char) -1;
val = __Pyx_PyInt_As_char(tmp);
Py_DECREF(tmp);
return val;
}
raise_overflow:
PyErr_SetString(PyExc_OverflowError,
"value too large to convert to char");
return (char) -1;
raise_neg_overflow:
PyErr_SetString(PyExc_OverflowError,
"can't convert negative value to char");
return (char) -1;
}
/* CheckBinaryVersion */
static int __Pyx_check_binary_version(void) {
char ctversion[4], rtversion[4];
PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION);
PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion());
if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) {
char message[200];
PyOS_snprintf(message, sizeof(message),
"compiletime version %s of module '%.100s' "
"does not match runtime version %s",
ctversion, __Pyx_MODULE_NAME, rtversion);
return PyErr_WarnEx(NULL, message, 1);
}
return 0;
}
/* InitStrings */
static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) {
while (t->p) {
#if PY_MAJOR_VERSION < 3
if (t->is_unicode) {
*t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL);
} else if (t->intern) {
*t->p = PyString_InternFromString(t->s);
} else {
*t->p = PyString_FromStringAndSize(t->s, t->n - 1);
}
#else
if (t->is_unicode | t->is_str) {
if (t->intern) {
*t->p = PyUnicode_InternFromString(t->s);
} else if (t->encoding) {
*t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL);
} else {
*t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1);
}
} else {
*t->p = PyBytes_FromStringAndSize(t->s, t->n - 1);
}
#endif
if (!*t->p)
return -1;
if (PyObject_Hash(*t->p) == -1)
return -1;
++t;
}
return 0;
}
static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) {
return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str));
}
static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) {
Py_ssize_t ignore;
return __Pyx_PyObject_AsStringAndSize(o, &ignore);
}
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
#if !CYTHON_PEP393_ENABLED
static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
char* defenc_c;
PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL);
if (!defenc) return NULL;
defenc_c = PyBytes_AS_STRING(defenc);
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
{
char* end = defenc_c + PyBytes_GET_SIZE(defenc);
char* c;
for (c = defenc_c; c < end; c++) {
if ((unsigned char) (*c) >= 128) {
PyUnicode_AsASCIIString(o);
return NULL;
}
}
}
#endif
*length = PyBytes_GET_SIZE(defenc);
return defenc_c;
}
#else
static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL;
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
if (likely(PyUnicode_IS_ASCII(o))) {
*length = PyUnicode_GET_LENGTH(o);
return PyUnicode_AsUTF8(o);
} else {
PyUnicode_AsASCIIString(o);
return NULL;
}
#else
return PyUnicode_AsUTF8AndSize(o, length);
#endif
}
#endif
#endif
static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) {
#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT
if (
#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII
__Pyx_sys_getdefaultencoding_not_ascii &&
#endif
PyUnicode_Check(o)) {
return __Pyx_PyUnicode_AsStringAndSize(o, length);
} else
#endif
#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE))
if (PyByteArray_Check(o)) {
*length = PyByteArray_GET_SIZE(o);
return PyByteArray_AS_STRING(o);
} else
#endif
{
char* result;
int r = PyBytes_AsStringAndSize(o, &result, length);
if (unlikely(r < 0)) {
return NULL;
} else {
return result;
}
}
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) {
int is_true = x == Py_True;
if (is_true | (x == Py_False) | (x == Py_None)) return is_true;
else return PyObject_IsTrue(x);
}
static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) {
int retval;
if (unlikely(!x)) return -1;
retval = __Pyx_PyObject_IsTrue(x);
Py_DECREF(x);
return retval;
}
static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) {
#if PY_MAJOR_VERSION >= 3
if (PyLong_Check(result)) {
if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1,
"__int__ returned non-int (type %.200s). "
"The ability to return an instance of a strict subclass of int "
"is deprecated, and may be removed in a future version of Python.",
Py_TYPE(result)->tp_name)) {
Py_DECREF(result);
return NULL;
}
return result;
}
#endif
PyErr_Format(PyExc_TypeError,
"__%.4s__ returned non-%.4s (type %.200s)",
type_name, type_name, Py_TYPE(result)->tp_name);
Py_DECREF(result);
return NULL;
}
static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) {
#if CYTHON_USE_TYPE_SLOTS
PyNumberMethods *m;
#endif
const char *name = NULL;
PyObject *res = NULL;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_Check(x) || PyLong_Check(x)))
#else
if (likely(PyLong_Check(x)))
#endif
return __Pyx_NewRef(x);
#if CYTHON_USE_TYPE_SLOTS
m = Py_TYPE(x)->tp_as_number;
#if PY_MAJOR_VERSION < 3
if (m && m->nb_int) {
name = "int";
res = m->nb_int(x);
}
else if (m && m->nb_long) {
name = "long";
res = m->nb_long(x);
}
#else
if (likely(m && m->nb_int)) {
name = "int";
res = m->nb_int(x);
}
#endif
#else
if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) {
res = PyNumber_Int(x);
}
#endif
if (likely(res)) {
#if PY_MAJOR_VERSION < 3
if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) {
#else
if (unlikely(!PyLong_CheckExact(res))) {
#endif
return __Pyx_PyNumber_IntOrLongWrongResultType(res, name);
}
}
else if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_TypeError,
"an integer is required");
}
return res;
}
static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) {
Py_ssize_t ival;
PyObject *x;
#if PY_MAJOR_VERSION < 3
if (likely(PyInt_CheckExact(b))) {
if (sizeof(Py_ssize_t) >= sizeof(long))
return PyInt_AS_LONG(b);
else
return PyInt_AsSsize_t(b);
}
#endif
if (likely(PyLong_CheckExact(b))) {
#if CYTHON_USE_PYLONG_INTERNALS
const digit* digits = ((PyLongObject*)b)->ob_digit;
const Py_ssize_t size = Py_SIZE(b);
if (likely(__Pyx_sst_abs(size) <= 1)) {
ival = likely(size) ? digits[0] : 0;
if (size == -1) ival = -ival;
return ival;
} else {
switch (size) {
case 2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -2:
if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -3:
if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case 4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
case -4:
if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) {
return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]));
}
break;
}
}
#endif
return PyLong_AsSsize_t(b);
}
x = PyNumber_Index(b);
if (!x) return -1;
ival = PyInt_AsSsize_t(x);
Py_DECREF(x);
return ival;
}
static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) {
return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False);
}
static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) {
return PyInt_FromSize_t(ival);
}
#endif /* Py_PYTHON_H */
|
row_wise_v2.c | #include<stdio.h>
#include<string.h>
#include <stdlib.h>
#include<mpi.h>
#include<omp.h>
#include<time.h>
//macros
#define ALPHABET_LENGTH 4
#define max(x,y) ((x)>(y)?(x):(y))
//global variables
char *string_A;
char *string_B;
char *unique_chars_C; //unique alphabets
int c_len;
int *P_Matrix;
int *DP_Results; //to store the DP values
int *dp_prev_row;
//function prototypes
int get_index_of_character(char *str,char x, int len);
void print_matrix(int **x, int row, int col);
void calc_P_matrix_v2(int *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size);
int lcs_yang_v2(int *DP,int *prev_row, int *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size);
int lcs(int **DP, char *A, char *B, int m, int n);
int get_index_of_character(char *str,char x, int len)
{
for(int i=0;i<len;i++)
{
if(str[i]== x)
{
return i;
}
}
return -1;//not found the character x in str
}
void print_matrix(int **x, int row, int col)
{
for(int i=0;i<row;i++)
{
for(int j=0;j<col;j++)
{
printf("%d ",x[i][j]);
}
printf("\n");
}
}
void calc_P_matrix_v2(int *P, char *b, int len_b, char *c, int len_c, int myrank, int chunk_size)
{
char receive_array_for_scatter_c[chunk_size];
int receive_array_for_scatter_p[chunk_size*(len_b+1)];
//Scatter the char array chunks by sending each process a particular chunk
MPI_Scatter(c, chunk_size, MPI_CHAR,&receive_array_for_scatter_c,chunk_size,MPI_CHAR, 0, MPI_COMM_WORLD);
//Scatter the char array chunks by sending each process a particular chunk
MPI_Scatter(P, chunk_size*(len_b+1),MPI_INT ,&receive_array_for_scatter_p,chunk_size*(len_b+1),MPI_INT, 0, MPI_COMM_WORLD);
// Broadcast the whole b array to everybody
MPI_Bcast(b, len_b, MPI_CHAR, 0, MPI_COMM_WORLD);
#pragma omp parallel for
for(int i=0;i<chunk_size;i++)
{
for(int j=1;j<len_b+1;j++)
{
if(b[j-1]==receive_array_for_scatter_c[i])
{
receive_array_for_scatter_p[(i*(len_b+1))+j] = j;
}
else
{
receive_array_for_scatter_p[(i*(len_b+1))+j] = receive_array_for_scatter_p[(i*(len_b+1))+j-1];
}
}
}
//now gather all the calculated values of P matrix in process 0
MPI_Gather(receive_array_for_scatter_p, chunk_size*(len_b+1),MPI_INT , P, chunk_size*(len_b+1), MPI_INT, 0, MPI_COMM_WORLD);
}
int lcs_yang_v2(int *DP, int *prev_row, int *P, char *A, char *B, char *C, int m, int n, int u, int myrank, int chunk_size)
{
MPI_Bcast(P, (u*(n+1)),MPI_INT , 0, MPI_COMM_WORLD);
for(int i=1;i<m+1;i++)
{
int c_i = get_index_of_character(C,A[i-1],u);
int dp_i_receive[chunk_size];
MPI_Scatter(DP, chunk_size, MPI_INT ,&dp_i_receive,chunk_size,MPI_INT, 0, MPI_COMM_WORLD);
int start_id = (myrank * chunk_size);
int end_id = (myrank * chunk_size) + chunk_size;
// printf("rank %d, start: %d, end: %d\n",myrank, start_id, end_id);
int t,s;
#pragma omp parallel for private(t,s) schedule(static)
for(int j=start_id;j<end_id;j++)
{
if(j==start_id && myrank==0)j=j+1;
t= (0-P[(c_i*(n+1))+j])<0;
s= (0 - (prev_row[j] - (t*prev_row[P[(c_i*(n+1))+j]-1]) ));
dp_i_receive[j-start_id] = ((t^1)||(s^0))*(prev_row[j]) + (!((t^1)||(s^0)))*(prev_row[P[(c_i*(n+1))+j]-1] + 1);
}
//now gather all the calculated values of P matrix in process 0
MPI_Allgather(dp_i_receive, chunk_size,MPI_INT ,DP, chunk_size, MPI_INT, MPI_COMM_WORLD);
#pragma omp parallel for schedule(static)
for(int j=0;j<(n+1);j++){
prev_row[j] = DP[j];
}
}
return DP[n];
}
int lcs(int **DP, char *A, char *B, int m, int n)
{
// printf("%s %d \n%s %d\n",A,m,B,n );
//print_matrix(DP,m+1,n+1);
for(int i=1;i<(m+1);i++)
{
for(int j=1;j<(n+1);j++)
{
// if(i==0 || j==0)
// {
// DP[i][j]=0;
// }
if(A[i-1] == B[j-1])
{
DP[i][j] = DP[i-1][j-1] + 1;
}
else
{
DP[i][j] = max(DP[i-1][j],DP[i][j-1]);
}
}
}
return DP[m][n];
}
int main(int argc, char *argv[])
{
if(argc <= 1){
printf("Error: No input file specified! Please specify the input file, and run again!\n");
return 0;
}
int my_rank;
int num_procs;
int chunk_size_p,chunk_size_dp;//chunk_size for P matrix and DP matrix
int res;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); //grab this process's rank
MPI_Comm_size(MPI_COMM_WORLD, &num_procs); //grab the total num of processes
FILE *fp;
int len_a,len_b;
double start_time,stop_time,start_time_yang,stop_time_yang;
if(my_rank == 0)printf("\nYour input file: %s \n",argv[1]);
fp = fopen(argv[1], "r");
fscanf(fp, "%d %d %d", &len_a, &len_b, &c_len);
string_A = (char *)malloc((len_a+1) * sizeof(char *));
string_B = (char *)malloc((len_b+1) * sizeof(char *));
unique_chars_C = (char *)malloc((c_len+1) * sizeof(char *));
fscanf(fp, "%s %s %s", string_A,string_B,unique_chars_C);
chunk_size_p = (c_len/num_procs);
chunk_size_dp = ((len_b+1)/num_procs);
if(my_rank==0)
{
printf("chunk_p: %d chunk_dp: %d procs: %d\n",chunk_size_p,chunk_size_dp,num_procs);
}
DP_Results = (int *)malloc((len_b+1) * sizeof(int));
dp_prev_row = (int *)malloc((len_b+1) * sizeof(int));
P_Matrix = (int *)malloc((c_len*(len_b+1)) * sizeof(int));
if(my_rank ==0)
{
// start_time = MPI_Wtime();
// printf("lcs is: %d\n",lcs(DP_Results,string_A,string_B,len_a,len_b));
// stop_time = MPI_Wtime();
// printf("time taken by normal algorithm is: %lf\n",stop_time-start_time);
}
/*
for(int k=0;k<len_a+1;k++)
{
for(int l=0;l<len_b+1;l++)
{
DP_Results[k][l]=0;
}
}
*/
start_time_yang = MPI_Wtime();
calc_P_matrix_v2(P_Matrix,string_B,len_b,unique_chars_C,c_len, my_rank, chunk_size_p);
res = lcs_yang_v2(DP_Results, dp_prev_row, P_Matrix,string_A,string_B,unique_chars_C,len_a,len_b,c_len,my_rank, chunk_size_dp);
stop_time_yang = MPI_Wtime();
if(my_rank == 0)
{
printf("lcs_yang_v2 is: %d\n",res);
printf("time taken for lcs_yang_v2 is: %lf\n",stop_time_yang-start_time_yang);
}
//deallocate pointers
free(P_Matrix);
free(DP_Results);
// Shutdown MPI (important - don't forget!)
MPI_Finalize();
return 0;
} |
ccl_fftlog.c | #include <stdlib.h>
#include <math.h>
#include <complex.h>
#include <fftw3.h>
#include <gsl/gsl_sf_result.h>
#include <gsl/gsl_sf_gamma.h>
#include "ccl.h"
/****************************************************************
This is the famous FFTLog.
First imlplemented by the living legend Andrew Hamilton:
http://casa.colorado.edu/~ajsh/FFTLog/
This version is a C version that was adapted from the C++ version found
in Copter JWG Carlson, another big loss for the cosmology community.
https://github.com/jwgcarlson/Copter
I've transformed this from C++ to C99 as the lowest common denominator
and provided bindings for C++ and python.
These are the C++ bindings
*****************************************************************/
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
#ifndef M_LN2
#define M_LN2 0.69314718056
#endif
/* This code is FFTLog, which is described in arXiv:astro-ph/9905191 */
static double complex lngamma_fftlog(double complex z)
{
gsl_sf_result lnr, phi;
gsl_sf_lngamma_complex_e(creal(z), cimag(z), &lnr, &phi);
return lnr.val + I*phi.val;
}
static double complex gamma_fftlog(double complex z)
{
return cexp(lngamma_fftlog(z));
}
static double complex polar (double r, double phi)
{
return (r*cos(phi) +I*(r*sin(phi)));
}
static void lngamma_4(double x, double y, double* lnr, double* arg)
{
double complex w = lngamma_fftlog(x+y*I);
if(lnr) *lnr = creal(w);
if(arg) *arg = cimag(w);
}
static double goodkr(int N, double mu, double q, double L, double kr)
{
double xp = (mu+1+q)/2;
double xm = (mu+1-q)/2;
double y = M_PI*N/(2*L);
double lnr, argm, argp;
lngamma_4(xp, y, &lnr, &argp);
lngamma_4(xm, y, &lnr, &argm);
double arg = log(2/kr) * N/L + (argp + argm)/M_PI;
double iarg = round(arg);
if(arg != iarg)
kr *= exp((arg - iarg)*L/N);
return kr;
}
/* Pre-compute the coefficients that appear in the FFTLog implementation of
* the discrete Hankel transform. The parameters N, mu, and q here are the
* same as for the function fht(). The parameter L is defined (for whatever
* reason) to be N times the logarithmic spacing of the input array, i.e.
* L = N * log(r[N-1]/r[0])/(N-1) */
static void compute_u_coefficients(int N, double mu, double q, double L, double kcrc, double complex *u)
{
double y = M_PI/L;
double k0r0 = kcrc * exp(-L);
double t = -2*y*log(k0r0/2);
if(q == 0) {
double x = (mu+1)/2;
double lnr, phi;
for(int m = 0; m <= N/2; m++) {
lngamma_4(x, m*y, &lnr, &phi);
u[m] = polar(1.0,m*t + 2*phi);
}
}
else {
double xp = (mu+1+q)/2;
double xm = (mu+1-q)/2;
double lnrp, phip, lnrm, phim;
for(int m = 0; m <= N/2; m++) {
lngamma_4(xp, m*y, &lnrp, &phip);
lngamma_4(xm,-m*y, &lnrm, &phim);
u[m] = polar(exp(q*M_LN2 + lnrp - lnrm), m*t + phip - phim);
}
}
for(int m = N/2+1; m < N; m++)
u[m] = conj(u[N-m]);
if((N % 2) == 0)
u[N/2] = (creal(u[N/2]) + I*0.0);
}
/* Compute the discrete Hankel transform of the function a(r). See the FFTLog
* documentation (or the Fortran routine of the same name in the FFTLog
* sources) for a description of exactly what this function computes.
* If u is NULL, the transform coefficients will be computed anew and discarded
* afterwards. If you plan on performing many consecutive transforms, it is
* more efficient to pre-compute the u coefficients. */
static void fht(int npk, int N,
double *k, double **pk,
double *r, double **xi,
double dim, double mu, double q, double kcrc,
int noring, double complex* u, int *status)
{
fftw_plan forward_plan, reverse_plan;
double L = log(k[N-1]/k[0]) * N/(N-1.);
double complex* ulocal = NULL;
if(u == NULL) {
if(noring)
kcrc = goodkr(N, mu, q, L, kcrc);
ulocal = malloc (sizeof(complex double)*N);
if(ulocal==NULL)
*status=CCL_ERROR_MEMORY;
if(*status == 0) {
compute_u_coefficients(N, mu, q, L, kcrc, ulocal);
u = ulocal;
}
}
fftw_complex* a_tmp;
fftw_complex* b_tmp;
if(*status == 0) {
a_tmp = fftw_alloc_complex(N);
if(a_tmp==NULL)
*status=CCL_ERROR_MEMORY;
}
if(*status == 0) {
b_tmp = fftw_alloc_complex(N);
if(b_tmp==NULL)
*status=CCL_ERROR_MEMORY;
}
if(*status == 0) {
/* Compute the convolution b = a*u using FFTs */
forward_plan = fftw_plan_dft_1d(N,
(fftw_complex*) a_tmp,
(fftw_complex*) b_tmp,
-1, FFTW_ESTIMATE);
reverse_plan = fftw_plan_dft_1d(N,
(fftw_complex*) b_tmp,
(fftw_complex*) b_tmp,
+1, FFTW_ESTIMATE);
}
if(*status == 0) {
#pragma omp parallel default(none) \
shared(npk, N, k, pk, r, xi, \
dim, mu, q, kcrc, u, status, \
forward_plan, reverse_plan, \
L, ulocal)
{
int local_status = 0;
double *prefac_pk=NULL;
if(local_status == 0) {
prefac_pk = malloc(N*sizeof(double));
if(prefac_pk==NULL)
local_status=CCL_ERROR_MEMORY;
}
double *prefac_xi=NULL;
if(local_status == 0) {
prefac_xi = malloc(N*sizeof(double));
if(prefac_xi==NULL)
local_status=CCL_ERROR_MEMORY;
}
fftw_complex* a=NULL;
fftw_complex* b=NULL;
if(local_status == 0) {
a = fftw_alloc_complex(N);
if(a==NULL)
local_status=CCL_ERROR_MEMORY;
}
if(local_status == 0) {
b = fftw_alloc_complex(N);
if(b==NULL)
local_status=CCL_ERROR_MEMORY;
}
if(local_status == 0) {
for(int i = 0; i < N; i++)
prefac_pk[i] = pow(k[i], dim/2-q);
/* Compute k's corresponding to input r's */
double k0r0 = kcrc * exp(-L);
r[0] = k0r0/k[0];
for(int n = 1; n < N; n++)
r[n] = r[0] * exp(n*L/N);
double one_over_2pi_dhalf = pow(2*M_PI,-dim/2);
for(int i = 0; i < N; i++)
prefac_xi[i] = one_over_2pi_dhalf * pow(r[i], -dim/2-q);
#pragma omp for
for(int j = 0; j < npk; j++) {
for(int i = 0; i < N; i++)
a[i] = prefac_pk[i] * pk[j][i];
fftw_execute_dft(forward_plan,a,b);
for(int m = 0; m < N; m++)
b[m] *= u[m] / (double)(N); // divide by N since FFTW doesn't normalize the inverse FFT
fftw_execute_dft(reverse_plan,b,b);
/* Reverse b array */
double complex tmp;
for(int n = 0; n < N/2; n++) {
tmp = b[n];
b[n] = b[N-n-1];
b[N-n-1] = tmp;
}
for(int i = 0; i < N; i++)
xi[j][i] = prefac_xi[i] * creal(b[i]);
}
}
free(prefac_pk);
free(prefac_xi);
fftw_free(a);
fftw_free(b);
if (local_status) {
#pragma omp atomic write
*status = local_status;
}
} //end omp parallel
}
if(*status == 0) {
fftw_destroy_plan(forward_plan);
fftw_destroy_plan(reverse_plan);
}
free(ulocal);
//TODO: free this up
fftw_free(a_tmp);
fftw_free(b_tmp);
}
void ccl_fftlog_ComputeXi2D(double mu, double epsilon,
int npk, int N, double *l,double **cl,
double *th, double **xi, int *status)
{
fht(npk, N, l, cl, th, xi, 2., mu, epsilon, 1, 1, NULL, status);
}
void ccl_fftlog_ComputeXi3D(double l, double epsilon,
int npk, int N, double *k, double **pk,
double *r, double **xi, int *status)
{
fht(npk, N, k, pk, r, xi, 3., l+0.5, epsilon, 1, 1, NULL, status);
}
|
mainapp.c | #include <stdio.h>
#include <stdlib.h>
#include <omp.h>
#include <windows.h>
//This block of codes was made by M. Raihan Azhari//
//------------------------------------------------
struct amalan{
//variabel amanalan ibadah harian
int tahajud;
int dhuha;
int wajib;
int tilawah;
int tahfidz;
struct amalan *next;
};
typedef struct User{
//variabel data user pengguna aplikasi (hanya 1 user)
char nama[30];
int target_status;
int target_tahajud, target_dhuha, target_wajib;
int target_tilawah, target_tahfidz;
int day;
struct amalan *data;
}User;
typedef struct amalan Amalan;
typedef Amalan *Amalanptr;
//end of codes block
//----------------------------------------------------
//This block of codes was made by Fikri Afif Musyaffa//
//------------------------------------------------
struct keluarga{ //struct untuk anggota keluarga waris
//variabel anggota keluarga
int suami;
int istri;
int anaklk;
int anakpr;
int sdrlk;
int sdrpr;
int bapak;
int ibu;
int cuculk;
int cucupr;
};
//end of codes block
//----------------------------------------------------
void input_data(Amalanptr *sptr);
void input_menu();
void printAmalan(Amalanptr current, int day_removed[50], User *userptr);
void print_evaluasi(Amalanptr current, User *userptr , int day_removed[50]);
void removeptr (Amalanptr *startPtr, int day, int day_removed[50]);
int file_user_read (User *userptr, int day_removed[50]);
int file_user_write (User *userptr, int day_removed[50]);
int file_amalan_write (Amalanptr current, int day_removed[50]);
int file_amalan_read (Amalanptr *sptr, int day_removed[50], int i, int *posisi);
void welcome(User *userptr);
void help_mutabaah();
int file_removed_write(int day[50]);
int file_removed_read(int day[50]);
int menuZakat();
void zakatPenghasilan();
void zakatMaal();
void list_zakatMaal();
void petunjuk_zakatPenghasilan();
void petunjuk_zakatMaal();
int jumlah(int x[], int n, int i);
int zakat(int * x, int y);
int menuWaris();
void perhitungan(int harta, int warisan[10], struct keluarga keluarga, int keturunan, int saudara, int orangtua); //algoritma perhitungan harta warisan
void list(struct keluarga keluarga); //list anggota keluarga yang mendapatkan warisan
void display(int warisan[10]); //menampilkan hasil perhitungan harta waris
void information();
int main(){
FILE *fptr;
int menu, day, i, j, id_input,id, mutabaah, status, login, login_status, file_status, posisi;
int day_removed[50] = {}; //array untuk mengetahui hari yang telah di remove
char temp; //untuk menghandling spasi
//set variabel ke 0 untuk mencegah adanya garbage value
menu = 0;
mutabaah = 0;
id = 0;
//struct user dalam fungsi ini dapat dipanggil melalui pointer *userptr
User *userptr;
userptr = (User*) calloc(1, 2 * sizeof(User));
/*Tadinya kami ingin membuat multi-user berdasarka idnya, namun karena kesulitan pada file handling maka
kami membuat 1 user saja, namun tetap menggunakan pointer struct*/
//start pointer dibuat NULL
Amalanptr startptr = NULL;
//pembacaan file
file_status = file_user_read(userptr, day_removed); // file_user_read merupakan file data user
file_removed_read(day_removed); //file_removed_read merupakan file untuk membaca hari hari yang telah diremove
//posisi untuk membaca file di set ke-0 untuk mencegah garbage value
posisi = 0;
//jika file_amalan_read masih belum dibuat
if((userptr + id)->day == 0){
file_amalan_read(&startptr, day_removed, 0, &posisi);
}
//jika file_amalan_read sudah dibuat
else{
//loop dijalankan sebanyak data hari yang telah diinput oleh user
for(i = 0; i < (userptr + id)->day - 1; i++){
file_amalan_read(&startptr, day_removed, i, &posisi);
}
//loop terakhir mempassing nilai -1 untuk menutup file
file_amalan_read(&startptr, day_removed, -1, &posisi);
}
//link dari userptr dilink ke alamat awal dari link list amalan (startptr)
(userptr + id)->data = startptr;
//loop akan terus berjalan hingga user memasukan angka -1
while (menu != -1){
welcome(userptr);
login_status = 1;
printf("\nMasukan angka: ");
scanf("%d", &menu);
system("cls");
//mutabaah diset ke angka 0 untuk mereset keadaan switch case
mutabaah = 0;
switch (menu){
case 1:
//---------------------------------------------------
//case 1 (Mutaba'ah Yaumiah) was made by M. Raihan Azhari
//loop akan terus berjalan sampai user memasukan angka -1
while(mutabaah != -1){
help_mutabaah();
printf("\nMasukan pilihan metode mutabaah: ");
scanf("%d", &mutabaah);
system("cls");
switch (mutabaah){
case 1:
// user diminta untuk menginput identitas dan target ibadah
//target_status bernialai 1 menandakan user sudah menginput data identitas dan target
(userptr + id)->target_status = 1;
printf("\nMasukan Nama : ");
scanf("%c", &temp);
scanf("%[^\n]", &(userptr + id)->nama);
printf("Masukan Target Rakaat Tahajud: ");
scanf("%d", &(userptr + id)->target_tahajud);
printf("Masukan Target Rakaat Dhuha: ");
scanf("%d", &(userptr + id)->target_dhuha);
(userptr + id)->target_wajib = 5; //target sholat wajib di set sebagai 5 karena merupakan ibadah wajib
printf("Masukan Target Halaman Tilawah: ");
scanf("%d", &(userptr + id)->target_tilawah);
printf("Masukan Target Ayat Tahfidz: ");
scanf("%d", &(userptr + id)->target_tahfidz);
printf("\n\n");
system("pause");
system("cls");
break;
case 2:
//case 2 user diminta untuk menginput amalan ibadah sesuai hari yang diinginkan
//jika target status != 1 maka error handling akan berjalan dan user diharapkan input case 1 terlebih dahulu
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printf("\nAssalamualaikum %s", (userptr + id)->nama);
printf("\nMasukan jumlah hari yang akan diinput: ");
scanf("%d", &day);
//user meminta input amalan sebanyak hari yang diinginkan
for(i = 0; i < day; i++){
printf("\n\nAmalan hari ke-%d", (userptr->day) + i + 1);
//input_data dipassing address dari start pointer sebagai pointer awal dalam link listed
input_data(&startptr);
}
//struct dari userptr dihubungkan ke starting pointer link listed amalan harian
(userptr + id)->data = startptr;
system("cls");
printf("\n\ninput berhasil !\n");
printAmalan((userptr + id)->data, day_removed, userptr);
printf("\n\n");
system("pause");
system("cls");
break;
case 3:
//case 3 user ditampilkan hasil evaluasi ibadah harian yang telah diinput
//error handling jika user belum memasukan identitas dan target
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printf("\nAssalamualaikum %s", (userptr + id)->nama);
//mencetak detail amalan harian yang dilakukan oleh user
printAmalan((userptr + id)->data, day_removed, userptr);
printf("\n\n");
system("pause");
system("cls");
//mencetak rekap amalan harian yang dilakukan oleh user beserta evaluasinya
print_evaluasi((userptr + id)->data, userptr, day_removed);
printf("\n\n");
system("pause");
system("cls");
break;
case 4:
//case 4 user diminta untuk memasukan hari yang ingin diremove
if((userptr + id)->target_status != 1){
printf("\nHarap masukan target terlebih dahulu\n\n");
system("pause");
system("cls");
break;
}
printf("Pilih hari yang akan dihapus: ");
scanf("%d", &day);
day--;
if(day >= (userptr + id)->day){
printf("\n\nAmalan hari ke %d belum pernah diinput\n\n", day+1);
system("pause");
system("cls");
break;
}
removeptr (&startptr, day, day_removed);
day_removed[day] = 1;
printf("\nData hari ke-%d berhasil dihapus\n\n", day+1);
system("pause");
system("cls");
break;
}
}
break;
//end of codes block
//---------------------------------------------------------
case 2:
menuZakat();
break;
case 3:
//---------------------------------------------------
//case 3 (Harta Waris) was made by Fikri Afif Musyaffa
menuWaris();
break;
case 4:
information();
break;
}
}
file_user_write(userptr, day_removed);
file_amalan_write((userptr + id)->data, day_removed);
file_removed_write(day_removed);
return 0;
}
//--------------------------------------------------------
//This function was made by M. Raihan Azhari
//fungsi untuk menginpt data amalan harian
void input_data(Amalanptr *sptr){
// int sks_var, kode_var, bobot_var, status_var, condition;
// char nilai_var;
Amalanptr currentptr; //pointer untuk menunjuk alamat yang sekarang
Amalanptr newptr; //pointer baru yang akan disisipkan
Amalanptr prevptr; //pointer sebelum currentptr
newptr = malloc(sizeof(Amalan));
//input ibadah harian oleh user
printf("\nMasukan amalan");
printf("\nRakaat Tahajud: ");
scanf("%d", &newptr->tahajud);
printf("Rakaat Dhuha: ");
scanf("%d", &newptr->dhuha);
printf("Banyak Sholat Wajib yang Dikerjakan: ");
scanf("%d", &newptr->wajib);
printf("Jumlah Halaman Tilawah: ");
scanf("%d", &newptr->tilawah);
printf("Jumlah Ayat Tahfidz: ");
scanf("%d", &newptr->tahfidz );
//next pointer dimasukan nilai NULL
newptr->next = NULL;
//previous pointer dijadikan NULL
prevptr = NULL;
//currentptr memiliki address yang sama dengan start pointer
currentptr = *sptr;
//while akan terus berjalan sampai currentptr menemukan NULL (sampai akhir pointer)
//loop disini bertujuan untuk mencari posisi pointer yang pas
while(currentptr != NULL){
prevptr = currentptr;
currentptr = currentptr->next;
}
//jika listed masih kosong maka instruksi akan dijalankan
if (prevptr == NULL){
//newptr disisipkan di awal link listed
newptr->next = *sptr;
*sptr = newptr;
}
//jika link list terisi maka newptr akan disisipkan
else{
prevptr->next = newptr;
newptr->next = currentptr;
}
}
//end of codes block
//----------------------------------------------------
//---------------------------------------------------
//This function was made by M. Raihan Azhari
void printAmalan(Amalanptr current, int day_removed[50], User *userptr){
int counter;
counter = 0; //counter di set ke-0 untuk mencegah garbage value
//selama variabel current tidak kosong
while(current != NULL){
//jika hari ke-n 0 maka print ibadah pada hari itu akan di skip
if(day_removed[counter] == 1){
printf("\n\nRekap ibadah hari ke-%d telah dihapus \n", counter + 1);
}
else{
//parallel programming
#pragma omp parallel
{
int tid;
tid = omp_get_thread_num();
#pragma omp single //menggunakan single thread
{
printf("\n\nRekap ibadah hari ke-%d:", counter + 1);
}
#pragma omp taskwait //parallel taskwait menghindari race condition
if(tid == 0){ //pembagian tugas ke thread berbeda
printf("\nTahajud : %d Rakaat",current->tahajud);
printf("\nDhuha : %d Rakaat",current->dhuha);
printf("\nWajib : %d Waktu",current->wajib);
}
if (tid == 1){ //pembagian tugas ke thread berbeda
printf("\nTilawah : %d Halaman",current->tilawah);
printf("\nTahfidz : %d Ayat",current->tahfidz);
}
#pragma omp taskwait //menggunakan taskwait supaya terhindar dari race condition
}
}
current = current->next;
counter++;
}
userptr->day = counter;
}
//end of codes block
//------------------------------------------------------------
//-------------------------------------------------------------
//this function was made by M. Raihan Azhari
void print_evaluasi(Amalanptr current, User *userptr , int day_removed[50]){
int counter, i;
int jumlah_tahajud, jumlah_dhuha, jumlah_wajib, jumlah_tilawah, jumlah_tahfidz, avg, tugas, step;
//mengnolkan variabel
jumlah_tahajud = 0;
jumlah_dhuha = 0;
jumlah_wajib = 0;
jumlah_tilawah = 0;
jumlah_tahfidz = 0;
counter = 0;
//selama current tidak kosong, maka akan diulang terus menerus
while (current != NULL){
jumlah_tahajud += current->tahajud;
jumlah_dhuha += current->dhuha;
jumlah_wajib += current->wajib;
jumlah_tilawah += current->tilawah;
jumlah_tahfidz += current->tahfidz;
counter ++;
current = current->next;
}
userptr->day = counter;
if(counter == 0){ //pengecekan data
printf("Data masih kosong");
}
else{ //jika data telah diinput
printf("\n Evaluasi ibadah harian selama %d hari: ", counter);
tugas = 0;
step = 0;
#pragma omp parallel private(tugas, step) //parallel menggunakan private
{
#pragma omp master //parallel menggunakan master
{
for(i = step; i < 5; i++){ //pembagian task 5 kali
tugas = i;
step = i;
#pragma omp task //pembagian task ke thread
{
if (tugas == 0 && userptr->target_tahajud != 0){
float rata_tahajud = jumlah_tahajud / (float)counter;
float result_tahajud = rata_tahajud /(float) userptr->target_tahajud;
#pragma omp critical //supaya terhindar dari race condition
{
printf("\n\n\t ~~Tahajud~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_tahajud);
printf("\nPersen ketercapaian target: %.2f %", result_tahajud * 100);
}
}
if(tugas == 1 && userptr->target_dhuha != 0){
float rata_dhuha = jumlah_dhuha / (float)counter;
float result_dhuha = rata_dhuha /(float)userptr->target_dhuha;
#pragma omp critical //supaya terhindar dari race condition
{
printf("\n\n\t ~~Dhuha~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_dhuha);
printf("\nPersen ketercapaian target: %.2f %", result_dhuha * 100);
}
}
if(tugas == 2 && userptr->target_wajib != 0){
float rata_wajib = jumlah_wajib / (float)counter;
float result_wajib = rata_wajib /(float)userptr->target_wajib;
#pragma omp critical //supaya terhindar dari race condition
{
printf("\n\n ~~Sholat wajib 5 waktu~~ ");
printf("\nRata-rata Rakaat setiap harinya : %.2f", rata_wajib);
printf("\nPersen ketercapaian target: %.2f %", result_wajib * 100);
}
}
if(tugas == 3 && userptr->target_tilawah != 0){
float rata_tilawah = jumlah_tilawah / (float)counter;
float result_tilawah = rata_tilawah /(float)userptr->target_tilawah;
#pragma omp critical //supaya terhindar dari race condition
{
printf("\n\n\t ~~Tilawah~~ ");
printf("\nRata-rata Halaman setiap harinya : %.2f", rata_tilawah);
printf("\nPersen ketercapaian target: %.2f %", result_tilawah * 100);
}
}
if(tugas == 4 && userptr->target_tilawah != 0){
float rata_tahfidz = jumlah_tahfidz / (float)counter;
float result_tahfidz = rata_tahfidz /(float)userptr->target_tahfidz;
#pragma omp critical //supaya terhindar dari race condition
{
printf("\n\n\t ~~Tahfidz~~ ");
printf("\nRata-rata Halaman setiap harinya : %.2f", rata_tahfidz);
printf("\nPersen ketercapaian target: %.2f %", result_tahfidz * 100);
}
}
}
}
}
#pragma omp taskwait //menunggu thread lain selesai menjalankan tugas
}
}
}
//-----------------------------------------------------------------------------------
//-----------------------------------------------
//Muhammad Raihan Azhari
void removeptr (Amalanptr *startPtr, int day, int day_removed[50]){
//fungsi ketiga pointer dibawah sama dengan fungsi ketiga pointer tersebut pada input_data
Amalanptr prevPtr;
Amalanptr tempPtr;
Amalanptr currentPtr;
int i, hari;
day++;
//jika hari pertama dimasukan maka pointer pertama akan diremove
if ( day == 0) {
tempPtr = *startPtr;
*startPtr = ( *startPtr )->next;
free( tempPtr );
}
else {
prevPtr = *startPtr;
currentPtr = ( *startPtr )->next;
//pencarian hari yang akan di remove menggunakan for loop
for(i = 1; i < day; i++){
//error handling terhadap hari yang akan dihapus
if(day_removed[i]== 1){
continue;
}
//jika hari pada iterasi menemukan hari yang dimaksud
if (i == day) {
tempPtr = currentPtr;
prevPtr->next = currentPtr->next;
free( tempPtr );
}
//next ke list selanjutnya
prevPtr = currentPtr;
currentPtr = currentPtr->next;
//jika currenptr bernilai NULL maka sudah mencapai link terakhir dan loop dihentikan
if(currentPtr == NULL) {
break;
}
}
}
}
//------------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
int file_user_read (User *userptr, int day_removed[50]){
FILE *fptr;
//membaca file "userMuslim.txt"
fptr = fopen("userMuslim.txt", "r");
char temp;
//jika tidak ada, maka membuat file baru bernama "userMuslim.txt"
if(fptr == NULL){
fclose(fptr);
fptr = fopen("userMuslim.txt", "w");
fclose(fptr); //menutup file
return 0;
}
else{
fseek(fptr, 0, SEEK_SET);
fscanf(fptr, "%c", &temp);
fscanf(fptr, "%[^\n]", &userptr->nama);
fscanf(fptr, "\n%d", &userptr->target_status);
fscanf(fptr, "\n%d", &userptr->target_tahajud);
fscanf(fptr, "\n%d", &userptr->target_dhuha);
fscanf(fptr, "\n%d", &userptr->target_wajib);
fscanf(fptr, "\n%d", &userptr->target_tilawah);
fscanf(fptr, "\n%d", &userptr->target_tahfidz);
fscanf(fptr, "\n%d", &userptr->day);
}
fclose(fptr); //menutup file "userMuslim.txt"
return 1;
}
//--------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
int file_user_write (User *userptr, int day_removed[50]){
FILE *fptr;
//menulis ke file "userMuslim.txt"
fptr = fopen("userMuslim.txt", "w");
fprintf(fptr, "\n%s", userptr->nama);
fprintf(fptr, "\n%d", userptr->target_status);
fprintf(fptr, "\n%d", userptr->target_tahajud);
fprintf(fptr, "\n%d", userptr->target_dhuha);
fprintf(fptr, "\n%d", userptr->target_wajib);
fprintf(fptr, "\n%d", userptr->target_tilawah);
fprintf(fptr, "\n%d", userptr->target_tahfidz);
fprintf(fptr, "\n%d", userptr->day);
fclose(fptr); //menutup file "userMuslim.txt"
}
//---------------------------------------------------
//---------------------------------------------------
//M. Raihan Azhari
int file_amalan_read (Amalanptr *sptr, int day_removed[50], int i, int *posisi){
//File didieklarasikan dalam pointer fptr
FILE *fptr;
int tahajud_var, dhuha_var, wajib_var, tilawah_var, tahfidz_var;
Amalanptr currentptr; //pointer menunjukan address link listed kondisi terkini
Amalanptr newptr; //pointer berisikan list baru yang akan disisipkan
Amalanptr prevptr; //pointer sebelumnya yang menunjuk ke currentptr
newptr = malloc(sizeof(Amalan));
//membuka file pada iterasi pertama untuk emngecek apakah file telah dibuat atau belum
if(i == 0){
fptr = fopen("amalan.txt", "r");
}
//jika file masih kosong (belum dibuat)
if(fptr == NULL){
//menutup file yang telah dibuka pada if(i == 0) untuk dibuka dalam keadaan write
fclose(fptr);
fptr = fopen("userMuslim.txt", "w");
fclose(fptr);
return 0;
}
//jika file sudah ada
else{
//jika file sudah ada dan pada iterasi pertama maka posisi akan di set ke awal pembacaan file
if(i == 0){
fseek(fptr, 0, SEEK_SET);
}
//jika file sudah ada dan pada iterasi bukan pertama maka posisi akan di set ke posisi terakhir
else{
fseek(fptr, 0 , SEEK_CUR);
}
//memasukan nilai
fscanf(fptr,"\n%d",&newptr->tahajud);
fscanf(fptr,"\n%d",&newptr->dhuha);
fscanf(fptr,"\n%d",&newptr->wajib);
fscanf(fptr,"\n%d",&newptr->tilawah);
fscanf(fptr,"\n%d",&newptr->tahfidz);
newptr->next = NULL;
prevptr = NULL;
currentptr = *sptr;
while(currentptr != NULL){
prevptr = currentptr;
currentptr = currentptr->next;
}
if (prevptr == NULL){
newptr->next = *sptr;
*sptr = newptr;
}
else{
prevptr->next = newptr;
newptr->next = currentptr;
}
if(i == -1){
fclose(fptr);
}
return 1;
}
}
//--------------------------------------------------
//---------------------------------------------------
//M. Raihan Azhari
int file_amalan_write (Amalanptr current, int day_removed[50]){
FILE *fptr;
fptr = fopen("amalan.txt", "w");
//membuka dan menulis file "amalan.txt"
while (current != NULL){
//menulis ke file "amalan.txt"
fprintf(fptr,"\n%d",current->tahajud);
fprintf(fptr,"\n%d",current->dhuha);
fprintf(fptr,"\n%d",current->wajib);
fprintf(fptr,"\n%d",current->tilawah);
fprintf(fptr,"\n%d",current->tahfidz);
current = current->next;
}
fclose(fptr);
//menutup file "amalan.txt"
}
//---------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
void welcome(User *userptr){ //fungsi menu awal program
int i, tid;
#pragma omp for //parallel menggunakan for
for(i = 0; i < 60; i++){
printf("-");
}
printf("\n\t\t\t Mu'min Pro\n");
#pragma omp for //pakai multithreads supaya lebih hemat waktu
for(i = 0; i < 60 ;i++){
printf("-");
}
//menu awal berisi 3 fitur utama
printf("\nAssalamu'alaikum %s", (userptr)->nama);
printf("\n\nMode Menu Mutabaah: ");
printf("\n1. Mutaba'ah Yaumiah (Evaluasi Ibadah Harian)");
printf("\n2. untuk Kalkulator Perhitungan Zakat");
printf("\n3. untuk Kalkulator Perhitungan Waris" );
printf("\n4. Untuk aturan penggunaan program");
printf("\n-1 Untuk keluar program\n\n");
}
//---------------------------------------------------
//---------------------------------------------------
//Fikri Afif Musyaffa
void help_mutabaah(){
//menu mutabaah yaumiyah
printf("\n1. Input / Ubah Target Mutabaah");
printf("\n2. Input Ibadah Harian");
printf("\n3. Lihat Evaluasi Ibadah Harian");
printf("\n4. Menghapus Ibadah Harian");
printf("\n-1 Untuk keluar program");
}
//---------------------------
//-----------------------
//M. Taqiy Nur Furqon
int file_removed_write(int day[50]){ //fungsi untuk menghapus hari
FILE *fptr;
//membuka file "removeday.txt"
fptr = fopen("removeday.txt", "w");
int i;
//menghapus hari mutabaah
for(i = 0; i < 50; i++){
fprintf(fptr, "\n%d", day[i]);
}
//menutup file "removeday.txt"
fclose(fptr);
}
//---------------------------------------------------
//---------------------------------------------------
//M. Taqiy Nur Furqon
int file_removed_read(int day[50]){
FILE *fptr;
//membuka file "removeday.txt"
fptr = fopen("removeday.txt", "r");
if (fptr == NULL){ //jika tidak ada file tersebut, maka bikin file baru
fclose(fptr);
fptr = fopen("removeday.txt", "w");
fclose (fptr);
return 0;
}
else{ //jika file sudah ada
int i;
//menghapus hari
for(i = 0 ; i < 50; i++){
fscanf(fptr, "\n%d", &day[i]);
}
return 1;
}
}
//---------------------------------------------------
//This block of codes made by Muhammad Taqiy Nur Furqon//
//----------------------------------------------------
//function untuk menampilkan menu perhitungan zakat
int menuZakat() {
int pilihan;//varibel untuk pemilihan menu pada menu zakat
int pilihan2;//variabel untuk pemilihan menu pada menu petunjuk zakat
//program zakat akan terus berjalan sampai pilihan bernilai -1
while(pilihan != -1) {
system("cls");
printf("\t\t\t\tKalkulator Zakat\n");
printf("\n1. Zakat Penghasilan\n");
printf("2. Zakat Maal\n");
printf("3. Petunjuk Penggunaan\n");
printf("-1. Kembali ke menu utama\n");
printf("\n>>>");
scanf("%d", &pilihan);
switch (pilihan) {
case 1 : system("cls");
zakatPenghasilan();
system("pause");
break;
case 2 : system("cls");
zakatMaal();
system("pause");
break;
case 3 : while (pilihan2 != -1) {
system("cls");
printf("\t\t\t\tPetunjuk Kalkulator Zakat\n");
printf("\n1. Zakat Penghasilan\n");
printf("2. Zakat Maal\n");
printf("-1. Kembali ke menu zakat\n");
printf(">>>");
scanf("%d", &pilihan2);
switch (pilihan2) {
case 1 : system ("cls");
petunjuk_zakatPenghasilan();
system("pause");
break;
case 2 : system ("cls");
petunjuk_zakatMaal();
system("pause");
break;
}
}
}
}
system("cls");
}
//function untuk menghitung zakat penghasilan
void zakatPenghasilan() {
//variabel untuk menghitung zakat penghasilan
int penghasilan[2];
int pengeluaran;
int penghasilanBersih = 0;
int hargaBeras;
int nisabZakat;
int jumlahZakat;
int i;//variabel counter untuk looping
printf("\t\t\tZakat Penghasilan\n\n");
printf("Penghasilan/gaji per bulan:\nRp ");
scanf("%d", &penghasilan[0]);
printf("Penghasilan lain-lain per bulan:\nRp ");
scanf("%d", &penghasilan[1]);
printf("Hutang/Cicilan untuk Kebutuhan Pokok*\n ");
printf("Kebutuhan pokok termasuk kebutuhan sandang, pangan, papan, pendidikan, kesehatan dan alat transportasi primer.\nRp ");
scanf("%d", &pengeluaran);
//menghitung penghasilan bersih untuk perhitungan zakat
penghasilanBersih = jumlah(penghasilan, 2, 0) - pengeluaran;
//menampilkan jumlah penghasilan bersih
printf("\nJumlah Penghasilan per bulan:\n");
printf("Rp %d", penghasilanBersih);
//menghitung nisab zakat
printf("\n\t\t\tNisab Zakat Penghasilan\n");
printf("Nisab adalah syarat jumlah minimum (ambang batas) harta yang dapat dikategorikan sebagai harta wajib zakat.\n");
printf("Untuk penghasilan yang diwajibkan zakat adalah penghasilan yang berada diatas nisab.\n");
printf("Nisab Zakat Penghasilan adalah setara 522 kg beras normal.\n");
printf("\n Masukkan harga beras saat ini (per kg)\nRp ");
scanf("%d", &hargaBeras);
nisabZakat = hargaBeras * 522;
printf("\nBesarnya Nisab Zakat Penghasilan per Bulan\n");
printf("Rp %d", nisabZakat);
//pengecekan penghasilan bersih terhadap nisab untuk penentuan pembayaran zakat
if (penghasilanBersih >= nisabZakat) {
printf("\n\nAnda diwajibkan membayar zakat\n");
printf("Jumlah yang harus dibayarkan adalah:\n");
zakat(&jumlahZakat, penghasilanBersih);//memanggil fungsi zakat untuk menampilkan jumlah zakat yang dibayarkan
printf("Rp %d\n\n", jumlahZakat);
}
else
printf("\nAnda tidak diwajibkan membayar zakat\n");
}
//function untuk menghitung zakat harta
void zakatMaal() {
//variabel untuk menghitung zakat maal
int hartaTersimpan[9];
int harta = 0;
int hutangTempo;
int hartaTerhitung;
int hargaEmas;
int nisabZakat;
int jumlahZakat;
int i;//variabel counter untuk looping
list_zakatMaal();//memanggil function list_zakatMaal
printf("\nMasukkan jumlah harta berikut secara berurutan:\n");
for(i = 0; i < 9; i++) {
printf("%d. Rp ", i + 1);
scanf("%d", &hartaTersimpan[i]);
}
printf("\nHutang jatuh tempo saat membayar kewajiban zakat:\nRp ");
scanf("%d", &hutangTempo);
//menghitung jumlah harta yang akan dihitung dalam zakat
hartaTerhitung = jumlah(hartaTersimpan, 9, 0) - hutangTempo;
system("cls");
//Menampilkan hasil input hartaTersimpan
list_zakatMaal();
#pragma omp for
for(i = 0; i < 9; i++) {
printf("%d. Rp %d\n", i + 1, hartaTersimpan[i]);
}
//menampilkan jumlah harta yang akan dihitung dalam zakat
printf("Jumlah harta yang dihitung zakatnya:\n");
printf("Rp %d\n", hartaTerhitung);
printf("\n Masukkan harga emas saat ini (dalam gram)\nRp ");
scanf("%d", &hargaEmas);
//menghitung jumlah nisab dan menampilkannya
nisabZakat = hargaEmas * 85;
printf("\nBesarnya Nisab Zakat Penghasilan per Bulan\n");
printf("Rp %d\n", nisabZakat);
//pengecekan harta yang dihitung terhadap nisab untuk penentuan zakat
if (hartaTerhitung >= nisabZakat) {
printf("\nAnda diwajibkan membayar zakat\n");
printf("Jumlah yang harus dibayarkan adalah:\n");
zakat(&jumlahZakat, hartaTerhitung);//memanggil fungsi zakat untuk menampilkan jumlah zakat yang dibayarkan
printf("Rp %d\n\n", jumlahZakat);
}
else
printf("\nAnda tidak diwajibkan membayar zakat\n");
}
//function untuk menghitung jumlah dari suatu array
int jumlah(int x[], int n, int i) {
int sum;
#pragma omp for
for(i = 0; i < n; i++) {
sum = sum + x[i];
}
return sum;
}
//function dasar untuk menghitung besar zakat
int zakat(int * x, int y) {
* x = y * 0.025;
}
//function untuk menampilkan harta yang perlu diinput untuk menghitung zakat maal
void list_zakatMaal() {
printf("\t\t\tZakat Maal\n");
printf("1. Harta dalam bentuk Tabungan/Giro/Deposito\n");
printf("2. Harta dalam bentuk logam mulia\n");
printf("3. Harta dalam bentuk surat berharga\n");
printf("4. Harta dalam bentuk Properti\n");
printf("5. Harta dalam bentuk kendaraan\n");
printf("6. Harta dalam bentuk koleksi seni & barang antik\n");
printf("7. Harta dalam bentuk stok barang dagangan\n");
printf("8. Harta dalam bentuk lainnya\n");
printf("9. Harta dalam bentuk Piutang Lancar\n\n");
}
//function untuk menampilkan petunjuk zakat penghasilan
void petunjuk_zakatPenghasilan() {
printf("\t\t\tPetunjuk Zakat Penghasilan\n\n");
printf("1. Program ini akan menghitung jumlah zakat penghasilan.\n");
printf("2. Untuk memulai program ini, masukkan angka 1 pada menu zakat.\n");
printf("3. Anda akan diminta untuk memasukkan penghasilan bulanan Anda.\n");
printf("4. Anda juga perlu memasukkan penghasilan bulanan lainnya.\n");
printf("5. Masukkan pengeluaran Anda per bulan dari kebutuhan pokok.\n");
printf("6. Yang dimaksud Kebutuhan Pokok adalah kebutuhan sandang, pangan, papan,\n");
printf(" pendidikan, kesehatan dan alat transportasi primer.\n");
printf("7. Program akan menampilkan penghasilan bersih Anda.\n");
printf("8. Masukkan harga beras sebagai nisab zakat penghasilan.\n");
printf("9. Program akan menununjukkan apakah Anda perlu membayar zakat.\n");
printf(" Jika iya, maka program akan menampilkan jumlah zakat yang harus dibayarkan.\n\n");
}
//function untuk menampilkan petunjuk zakat harta (maal)
void petunjuk_zakatMaal() {
printf("\t\t\tPetunjuk Zakat Maal\n\n");
printf("1. Program ini akan menghitung jumlah zakat maal.\n");
printf("2. Untuk memulai program ini, masukkan angka 2 pada menu zakat.\n");
printf("3. Anda akan diminta untuk memasukkan harta yang Anda miliki sebagai berikut:\n");
printf("\ta. Harta dalam bentuk Tabungan/Giro/Deposito\n");
printf("\tb. Harta dalam bentuk logam mulia\n");
printf("\tc. Harta dalam bentuk surat berharga\n");
printf("\t Surat Berharga antara lain nilai tunai dari Reksadana, Saham, Obligasi, Unit Link, dll.\n");
printf("\td. Harta dalam bentuk Properti\n");
printf("\t Rumah (properti) yang digunakan sehari-hari, TIDAK DIKENAKAN ZAKAT.\n");
printf("\te. Harta dalam bentuk kendaraan\n");
printf("\t Kendaraan yang digunakan sehari-hari, TIDAK DIKENAKAN ZAKAT.\n");
printf("\tf. Harta dalam bentuk koleksi seni & barang antik\n");
printf("\t Nilai Koleksi dapat ditaksir sendiri, bila dimungkinkan dapat dibantu kurator seni.\n");
printf("\tg. Harta dalam bentuk stok barang dagangan\n");
printf("\th. Harta dalam bentuk lainnya\n");
printf("\ti. Harta dalam bentuk Piutang Lancar\n\n");
printf("4. Anda akan diminta untuk memasukkan hutang yang jatuh temponya saat kewajiban membayar zakat\n");
printf(" Contoh bagi pedagang yang harus melunasi cicilan hutang atas barang yang diperdagangkan.\n");
printf("5. Masukkan harga emas sebagai nisab zakat maal.\n");
printf("6. Program akan menununjukkan apakah Anda perlu membayar zakat.\n");
printf(" Jika iya, maka program akan menampilkan jumlah zakat yang harus dibayarkan.\n\n");
}
//end of codes block
//--------------------------------------------------------
//--------------------------------------------------------
//This function was made by Fikri Afif Musyaffa
//function menu waris
int menuWaris(){
struct keluarga keluarga; //mendefinisikan struct keluarga
int warisan[10]; //isi harta warisan yang diterima masing-masing anggota keluarga
int harta, hutang; //total harta yang diwarisi
int pewaris, keturunan, saudara, orangtua, pilih; //variabel cek kondisi
printf("Perhitungan Waris\n");
printf("\nMasukkan total harta yang ditinggalkan Pewaris : ");
scanf("%d", &harta); //harta awal pewaris
printf("Masukkan total hutang yang ditinggalkan Pewaris : ");
scanf("%d", &hutang); //jika pewaris memiliki hutang yang belum terbayar
harta = harta - hutang; //harta akhir pewaris setelah hutang terbayarkan
printf("\nTotal harta : Rp. %d\n\n", harta);
system("pause");
system("cls");
printf("Jika pewaris laki-laki, masukkan angka 1");
printf("\nJika pewaris perempuan, masukkan angka 2");
printf("\nMasukkan angka : ");
scanf("%d", &pewaris);
while(pewaris != 1 || pewaris != 2){ //error handling menentukan istri dan suami
if(pewaris == 1){ //jika pewaris adalah laki-laki
keluarga.suami = 0; //laki-laki yang meninggal, maka dia tidak mendapatkan harta
printf("\n\nApakah pewaris memiliki istri yang masih hidup?");
printf("\nJika iya, masukkan angka 1");
printf("\nJika tidak, masukkan angka 0");
printf("\n\nMasukkan angka : ");
scanf("%d", &keluarga.istri);
if(keluarga.istri == 1){
system("cls");
printf("Masukkan jumlah istri yang masih hidup : "); //kemungkinan istri lebih dari 1
scanf("%d", &keluarga.istri);
break;
}
else if(keluarga.istri == 0){ //jika istrinya sudah meninggal lebih dulu
break;
}
else{
printf("\nMasukkan angka dengan benar !\n\n"); //error handling pewaris laki-laki
}
}
else if(pewaris == 2){ //jika pewaris adalah perempuan
keluarga.istri = 0; //perempuan yang meninggal, maka dia tidak mendapatkan harta
printf("Apakah pewaris memiliki suami yang masih hidup?");
printf("\nJika iya, masukkan angka 1");
printf("\nJika tidak, masukkan angka 0");
printf("\n\nMasukkan angka : ");
scanf("%d", &keluarga.suami);
if(keluarga.suami == 1 || keluarga.suami == 0){ //meminta data apakah suami pewaris masih hidup atau sudah meninggal
break;
}
else{
printf("\nMasukkan angka dengan benar !\n\n"); //error handling pewaris perempuan
}
}
else{
printf("Masukkan angka dengan benar ! : "); //error handling pewaris perempuan
scanf("%d", &pewaris);
}
}
system("cls");
printf("Apakah pewaris memiliki keturunan yang masih hidup?");
printf("\nJika iya, masukkan angka 1");
printf("\nJika tidak, masukkan angka 0");
printf("\n\nMasukkan angka : ");
scanf("%d", &keturunan); //cek apakah pewaris memiliki keturunan yang masih hidup
while(keturunan != 1 || keturunan != 0){ //error handling untuk anak dan cucu
if(keturunan == 1){ //meminta data anak dan cucu jika ada
system("cls");
printf("Masukkan jumlah anak laki-laki : ");
scanf("%d", &keluarga.anaklk);
printf("Masukkan jumlah anak perempuan : ");
scanf("%d", &keluarga.anakpr);
printf("Masukkan jumlah cucu laki-laki : ");
scanf("%d", &keluarga.cuculk);
printf("Masukkan jumlah cucu perempuan : ");
scanf("%d", &keluarga.cucupr);
break;
}
else if(keturunan == 0){ //mengosongkan semua variabel anak dan cucu
keluarga.anaklk = 0;
keluarga.anakpr = 0;
keluarga.cuculk = 0;
keluarga.cucupr = 0;
break;
}
else{
printf("Masukkan angka dengan benar ! : "); //error handling untuk anak dan cucu
scanf("%d", &keturunan);
}
}
system("cls");
printf("Apakah pewaris memiliki saudara/i yang masih hidup?");
printf("\nJika iya, masukkan angka 1");
printf("\nJika tidak, masukkan angka 0");
printf("\n\nMasukkan angka : ");
scanf("%d", &saudara); //cek apakah pewaris memilki saudara kandung yang masih hidup
while(saudara != 1 || saudara != 0){ //error handling menentukan saudara
if(saudara == 1){ //meminta data jumlah saudara
system("cls");
printf("Masukkan jumlah saudara laki-laki : ");
scanf("%d", &keluarga.sdrlk);
printf("Masukkan jumlah saudara perempuan : ");
scanf("%d", &keluarga.sdrpr);
break;
}
else if(saudara == 0){ //mengosongkan semua variabel saudara
keluarga.sdrlk = 0;
keluarga.sdrpr = 0;
break;
}
else{
printf("Masukkan angka dengan benar ! : "); //error handling saudara
scanf("%d", &saudara);
}
}
system("cls");
printf("Apakah pewaris memiliki orang tua yang masih hidup?");
printf("\nJika iya, masukkan angka 1");
printf("\nJika tidak, masukkan angka 0");
printf("\n\nMasukkan angka : ");
scanf("%d", &orangtua); //cek kondisi apakah pewaris memiliki orang tua yang masih hidup
while(orangtua != 0 || orangtua != 1){ //error handling untuk orang tua pewaris
if(orangtua == 1){ //meminta data orang tua
printf("\n1. Bapak");
printf("\n2. Ibu");
printf("\n3. Keduanya");
printf("\n\nMasukkan angka : ");
scanf("%d", &pilih);
if(pilih == 1){ //bapak masih hidup
keluarga.bapak = 1;
keluarga.ibu = 0;
break;
}
else if(pilih == 2){ //ibu masih hidup
keluarga.bapak = 0;
keluarga.ibu = 1;
break;
}
else if(pilih == 3){ //kedua orang tua masih hidup
keluarga.bapak = 1;
keluarga.ibu = 1;
break;
}
else{
printf("\nMasukkan angka dengan benar !\n\n"); //error handling untuk orang tua pewaris
}
}
else if(orangtua == 0){ //keduanya telah tiada
keluarga.bapak = 0;
keluarga.ibu = 0;
break;
}
else{
printf("Masukkan angka dengan benar ! : "); //error handling orang tua
scanf("%d", &orangtua);
}
}
perhitungan(harta, warisan, keluarga, keturunan, saudara, orangtua); //algoritma perhitungan harta warisan
list(keluarga); //list anggota keluarga yang mendapatkan warisan
display(warisan); //menampilkan hasil perhitungan harta waris
printf("\n\n\n");
system("pause");
system("cls");
//end of codes block
//---------------------------------------------------------
}
//--------------------------------------------------------
//This function was made by Fikri Afif Musyaffa
void perhitungan(int harta, int warisan[10], struct keluarga keluarga, int keturunan, int saudara, int orangtua){
int i; //variabel for loop
int sisa = 0; //harta waris yang tersisa
int temp = 0; //menghitung pembagian sementara antara laki-laki dan perempuan
int sisa_bapak = 0; //perhitungan sisa untuk bapak pewaris jika masih hidup
#pragma omp for
for(i = 0; i < 10; i++){
warisan[i] = 0; //mengosongkan array
}
if(keluarga.suami == 1){ //algoritma menentukan harta yang didapatkan suami
if(keluarga.anaklk >= 1){
if(keluarga.anakpr >= 1){
if(keluarga.cuculk >= 1){
if(keluarga.cucupr >= 1){
if(keluarga.sdrlk || keluarga.sdrpr >= 1){
warisan[0] = harta / 2; //harta yg didapatkan jika semua penyataan terpenuhi
}else warisan[0] = harta / 4;
}else warisan[0] = harta / 4;
}else warisan[0] = harta / 4;
}else warisan[0] = harta / 4;
}else warisan[0] = harta / 4;
}
sisa = harta - warisan[0]; //sisa harta setelah suami mendapatkan warisan
if(keluarga.istri >= 1){ //algoritma menentukan harta yang didapatkan istri
if(keluarga.anaklk >= 1){
if(keluarga.anakpr >= 1){
if(keluarga.cuculk >= 1){
if(keluarga.cucupr >= 1){
if(keluarga.sdrlk || keluarga.sdrpr >= 1){
warisan[1] = (harta / 4) / keluarga.istri; //masing-masing istri mendapatkan satu per delapan
}else warisan[1] = (harta / 8) / keluarga.istri;
}else warisan[1] = (harta / 8) / keluarga.istri;
}else warisan[1] = (harta / 8) / keluarga.istri;
}else warisan[1] = (harta / 8) / keluarga.istri;
}else warisan[1] = (harta / 8) / keluarga.istri;
}
sisa = harta - (warisan[1] * keluarga.istri); //sisa harta setelah istri mendapatkan warisan
int tugas = 0; //variabel untuk for loop
#pragma omp parallel private (tugas) //parallel programming menggunakan private threads
{
#pragma omp master //parallel master
{
for(i = 0; i < 3; i++){
tugas = i; //supaya task yang dikerjakan berurutan
#pragma omp task //parallel task
{
if(tugas == 1){ //loop pertama dieksekusi
if(keluarga.ibu == 1){
if(keluarga.anaklk >= 1 || keluarga.anakpr >= 1 || keluarga.cuculk >= 1 || keluarga.cucupr >= 1){ //pengecekan harta yang diperoleh
#pragma omp critical //menghindari race condition
{
warisan[7] = harta / 6; //harta yang didapatkan ibu
sisa = sisa - warisan[7]; //sisa setelah ibu mendapatkan harta
}
}
else{
if(keluarga.ibu == 1){
if((keluarga.suami == 1) || (keluarga.istri >= 1) || keluarga.bapak == 1){ //pengecekan syarat harta yang diperoleh
#pragma omp critical //menghindari race condition
{
warisan[7] = harta / 3; //harta yang didapatkan ibu
sisa = sisa - warisan[7]; //sisa setelah ibu mendapatkan harta
}
}
}
}
}
}
if(tugas == 2){ //loop kedua dieksekusi
if(keluarga.bapak == 1 ){
if(keluarga.anaklk >= 1 || keluarga.anakpr >= 1 ||keluarga.cuculk >= 1 || keluarga.cucupr >= 1){ //pengecekan syarat harta yang diperoleh
#pragma omp critical //menghindari race condition
{
warisan[6] = harta / 6; //harta yang didapat bapak
sisa = sisa - warisan[6]; //sisa setelah bapak mendapatkan harta
}
}
else{
#pragma omp critical //menghindari race condition
{
warisan[6] = harta / 6; //harta yang didapat bapak
sisa = sisa - warisan[6]; //sisa setelah bapak mendapatkan harta
sisa_bapak = 1;
}
}
}
}
}
}
}
#pragma omp taskwait //menghindari race condition
}
if(keluarga.anaklk >= 1){ //anak laki-laki
if(keluarga.anakpr >= 1){
temp = (2 * keluarga.anaklk) + keluarga.anakpr; //perbandingan anak laki 2x lebih banyak dibanding perempuan
warisan[2] = (2 * sisa) / temp; //harta yang didapatkan masing-masing anak laki-laki
warisan[3] = sisa / temp; //harta yang didapatkan masing-masing anak perempuan
sisa = sisa - (keluarga.anaklk * warisan[2] + keluarga.anakpr * warisan[3]);
}
else{ //jika anak perempuan tidak ada
temp = (2 * keluarga.anaklk) + keluarga.anakpr; //perbandingan anak laki 2x lebih banyak dibanding perempuan
warisan[2] = (2 * sisa) / temp;
sisa = sisa - (keluarga.anaklk * warisan[2] + keluarga.anakpr * warisan[3]); //sisa harta
}
}
else if(keluarga.anaklk == 0){ //jika anak laki-laki tidak ada
if(keluarga.anakpr == 1){ //anak perempuan sendiri
warisan[3] = harta / 2; //harta anak perempuan
sisa = sisa - warisan[3];
if(keluarga.cuculk >= 1){
warisan[8] = sisa / keluarga.cuculk; //harta masing-masing cucu laki-laki
sisa = sisa - (warisan[8] * keluarga.cuculk);
}
}
else if(keluarga.anakpr >= 2){ //anak perempuan lebih dari sama dengan 2
warisan[3] = ((2 * harta) / 3) / keluarga.anakpr; //harta masing-masing anak perempuan
sisa = sisa - (warisan[3] * keluarga.anakpr);
if(keluarga.cuculk >= 1){
warisan[8] = sisa / keluarga.cuculk; //harta masing-masing cucu laki-laki
sisa = sisa - (warisan[8] * keluarga.cuculk);
}
}
else if(keluarga.anakpr == 0){ //jika anak perempuan tidak ada
if(keluarga.cuculk >= 1){ //jika cucu laki ada, maka pembagian sebagai berikut
if(keluarga.cucupr >= 1){
temp = (2 * keluarga.cuculk) + keluarga.cucupr;
warisan[8] = (2 * sisa) / temp; //cucu laki-laki 2x cucu perempuan
warisan[9] = sisa / temp;
sisa = sisa - (keluarga.cuculk * warisan[8] + keluarga.cucupr * warisan[9]);
}
else if(keluarga.cuculk == 0){ //jika cucu laki tidak ada, maka pembagian sebagai berikut
if(keluarga.anakpr >= 1){ //jika anak perempuan ada
warisan[9] = (harta / 6) / keluarga.cucupr; //harta masing-masing cucu perempuan
sisa = sisa - (warisan[9] * keluarga.cucupr);
}
else if(keluarga.cucupr == 1){ //cucu perempuan hanya 1
warisan[9] = harta / 2; //harta cucu perempuan
sisa = sisa - warisan[9];
}
else if(keluarga.cucupr >= 2){ //cucu perempuan lebih besar sama dengan 2
warisan[9] = ((2 * harta) / 3) / keluarga.cucupr; //harta masing-masing cucu perempuan
sisa = sisa - (warisan[9] * keluarga.cucupr);
}
}
}
else if(keluarga.cuculk == 0){ //jika cucu laki tidak ada
if(keluarga.sdrlk >= 1){ //jika saudara laki ada
if(keluarga.sdrpr >= 1){ //jika saudara perempuan ada
temp = (2 * keluarga.sdrlk) + keluarga.sdrpr;
warisan[4] = (2 * sisa) / temp; //laki-laki mendapatkan 2x perempuan
warisan[5] = sisa / temp;
sisa = sisa - (keluarga.sdrlk * warisan[4] + keluarga.sdrpr * warisan[5]);
}
}
else if(keluarga.sdrlk == 0){ //jika saudara laki tidak ada
if(keluarga.sdrpr == 1){
warisan[5] = harta / 2; //harta saudara perempuan
sisa = sisa - warisan[5];
}
else if(keluarga.sdrpr >= 2){ //jika saudara perempuan lebih dari sama dengan 2
warisan[5] = ((2 * harta) / 3) / keluarga.sdrpr; //harta masing-masing saudara perempuan
sisa = sisa - (warisan[5] * keluarga.sdrpr);
}
}
}
}
}
if(sisa_bapak == 1 && sisa > 0){ //jika masih ada sisa, maka sisa tersebut milik bapak pewaris
warisan[6] += sisa; //harta akhir bapak
}
#pragma omp for //pembagian for loop untuk setiap threads
for(i = 0; i < 10; i++){ //error handling jika ada yang mendapatkan harta negatif
if(warisan[i] < 0){
warisan[i] = 0;
}
}
printf("\nSedang menghitung pembagian harta warisan . . . \n\n");
sleep(3); //jeda 3 detik
system("cls");
printf("SISA Harta Pewaris : %d\n\n", sisa); //sisa akhir perhitungan
}
//end of codes block
//--------------------------------------------------------
//--------------------------------------------------------
//This function was made by Fikri Afif Musyaffa
void list(struct keluarga keluarga){ //menampilkan jumlah anggota keluarga dari masing-masing kategori
printf("Key list anggota keluarga\n\n");
printf("1. Suami : %d\n", keluarga.suami);
printf("2. Istri : %d\n", keluarga.istri);
printf("3. Anak Laki-laki : %d\n", keluarga.anaklk);
printf("4. Anak Perempuan : %d\n", keluarga.anakpr);
printf("5. Saudara Laki-laki : %d\n", keluarga.sdrlk);
printf("6. Saudara Perempuan : %d\n", keluarga.sdrpr);
printf("7. Bapak : %d\n", keluarga.bapak);
printf("8. Ibu : %d\n", keluarga.ibu);
printf("9. Cucu Laki-laki : %d\n", keluarga.cuculk);
printf("10. Cucu Perempuan : %d\n", keluarga.cucupr);
}
//end of codes block
//--------------------------------------------------------
//--------------------------------------------------------
//This function was made by Fikri Afif Musyaffa
void display(int warisan[10]){ //menampilkan harta yang telah dibagi sesuai aturan waris
int i;
#pragma omp for //pembagian for loop ke masing-masing threads
for(i = 0; i < 10; i++){
printf("\nKategori anggota keluarga ke-%d mendapatkan masing-masing : Rp. %d", i+1, warisan[i]);
}
}
//end of codes block
//--------------------------------------------------------
//petunjuk penggunaan program
void information (){
system("cls");
printf("Petunjuk penggunaan program: \n");
#pragma omp parallel
{
int tid;
tid = omp_get_thread_num();
if(tid == 0){ //pembagian tugas ke thread
printf("\n~~ Harap masukan menu 1 (target) terlebih dahulu sebelum masuk ke menu selanjutnya");
printf("\n~~ Jika anda ingin kembali ke menu utama atau mengakhiri program harap tekan - 1");
printf("\n~~ Jangan pernah memasukan huruf !, Jika terjadi maka bisa restart program");
}
if(tid == 1){ //pembagian tugas ke thread
printf("\n~~ Anda dapat mengubah target amalan atau identitas dengan menu 1");
printf("\n~~ Pastikan anda selalu menekan -1 untuk mengakhiri program supaya data anda tersimpan");
}
}
printf("\n\n");
system("pause");
system("cls");
}
|
smith.c | /*!
* \file smith.c
* \author Jun Yoshida
* \copyright (c) Jun Yoshida 2019
* The project is released under BSD3 License.
* \detail
* Compute the Smith normal forms by recursively application of hermiteNF_LLL.
*/
#include "common.h"
#include "elementary.h"
#include "hermite_lll.h"
#include "smith.h"
#include <stdlib.h>
/* / Debug
#include <stdio.h>
#define DEBUG_MESSAGE fprintf(stderr, "%s:%d\n", __func__, __LINE__)
// */
typedef struct mat_index_t_ {
size_t i,j;
} mat_index_t;
/*!
* Find the size of the maximal diagonal matrix D such that
* > D | O
* > mat = --+--
* > O | *
*/
static
size_t max_diagonal(const matrix_type * mat)
{
size_t result = mat->r < mat->c ? mat->r : mat->c;
#pragma omp parallel for reduction (min:result)
for (size_t i = 0; i < mat->r; ++i) {
for (size_t j = 0; j < mat->c; ++j) {
if ( (i!=j) && (MATRIX_AT(*mat,i,j) != 0) ) {
result = (i>j) ? j : i;
break;
}
}
}
return result;
}
/*!
* Find the last unit in the diagonal entries.
*/
static inline
size_t find_last_unit_diag(const matrix_type *mat)
{
size_t bound = mat->r < mat->c ? mat->r : mat->c;
size_t i;
for (i = 0; i < bound; ++i) {
if (MATRIX_AT(*mat, i, i) != 1)
break;
}
return i;
}
/*!
* Find the first zero in the diagonal entries.
*/
static inline
size_t find_first_zero_diag(const matrix_type *mat)
{
size_t bound = mat->r < mat->c ? mat->r : mat->c;
size_t i;
for (i = 0; i < bound; ++i) {
if (MATRIX_AT(*mat, i, i) == 0)
break;
}
return i;
}
/*!
* Eliminate all the off-diagonal entries by applying LLL-based algorithm recursively.
* \return the number of non-zero diagonals in the resulting matrix.
*/
static
size_t elim_offdiag(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict m, matrix_type * restrict v, matrix_type * restrict vinv)
{
matrix_type u_iter = *u, uinv_iter = *uinv;
matrix_type m_iter = *m;
/* Iterator reffering to transposed v */
matrix_type vt_iter = {
.p = v->p,
.r = v->c,
.c = v->r,
.Xr = v->Xc,
.Xc = v->Xr
};
matrix_type vinvt_iter = {
.p = vinv->p,
.r = vinv->c,
.c = vinv->r,
.Xr = vinv->Xc,
.Xc = vinv->Xr
};
/* Counter for non-zero diagonals. */
size_t ndiag = 0;
while(m_iter.r > 0 && m_iter.c > 0) {
hermiteNF_LLL(
1, (matrix_type*[]){&u_iter},
1, (matrix_type*[]){&uinv_iter},
&m_iter );
transpose(&m_iter);
hermiteNF_LLL(
1, (matrix_type*[]){&vt_iter},
1, (matrix_type*[]){&vinvt_iter},
&m_iter );
transpose(&m_iter);
size_t k = max_diagonal(&m_iter);
/* update iterators */
m_iter.r -= k;
m_iter.c -= k;
m_iter.p += k * (m_iter.Xr + m_iter.Xc);
u_iter.c -= k;
u_iter.p += k * u_iter.Xc;
uinv_iter.r -= k;
uinv_iter.p += k * uinv_iter.Xr;
vt_iter.c -= k;
vt_iter.p += k * vt_iter.Xc;
vinvt_iter.r -= k;
vinvt_iter.p += k * vinvt_iter.Xr;
/* update the counter of non-zero diagonals. */
ndiag += k;
}
return ndiag;
}
/*!
* Compute the Smith normal form of a given matrix.
* \return the number of non-zero diagonals in the resulting matrix.
*/
size_t smithNF(matrix_type * restrict u, matrix_type * restrict uinv, matrix_type * restrict m, matrix_type * restrict v, matrix_type * restrict vinv)
{
matrix_type u_iter = u ? *u : MATRIX_ZEROROW(m->r);
matrix_type uinv_iter = uinv ? *uinv : MATRIX_ZEROCOL(m->r);
matrix_type m_iter = *m;
matrix_type v_iter = v ? *v : MATRIX_ZEROCOL(m->c);
matrix_type vinv_iter = vinv ? *vinv : MATRIX_ZEROROW(m->c);
size_t ndiag
= elim_offdiag(&u_iter, &uinv_iter, &m_iter, &v_iter, &vinv_iter);
// Trim the matrix into square
if (m_iter.r > m_iter.c) {
m_iter.r = m_iter.c;
u_iter.c = m_iter.c;
uinv_iter.r = m_iter.c;
}
else {
m_iter.c = m_iter.r;
v_iter.r = m_iter.r;
vinv_iter.c = m_iter.r;
}
// Ignore all the diagonal entries == 1
size_t k = find_last_unit_diag(&m_iter);
/* / Debug
fprintf( stderr, "1!=@%zu\n", k);
// */
m_iter.r -= k;
m_iter.c -= k;
m_iter.p += k * (m_iter.Xr + m_iter.Xc);
u_iter.p += k * u_iter.Xc;
uinv_iter.p += k * uinv_iter.Xr;
v_iter.p += k * v_iter.Xr;
vinv_iter.p += k * vinv_iter.Xc;
// Ignore the null space
k = find_first_zero_diag(&m_iter);
/* / Debug
fprintf( stderr, "0==@%zu\n", k);
// */
m_iter.r = k;
m_iter.c = k;
u_iter.c = k;
uinv_iter.r = k;
v_iter.r = k;
vinv_iter.c = k;
/* / Debug
fprintf( stderr, "%zu >< %zu, %"PRId64"\n", m_iter.r, m_iter.c, *(m_iter.p));
// */
while (m_iter.r > 0) {
#pragma omp parallel for
for (size_t i = 1; i < m_iter.r; ++i) {
MATRIX_AT(m_iter, i, 0) = MATRIX_AT(m_iter, i, i);
axpy_rows(-1, 0, i, &v_iter);
axpy_columns(1, i, 0, &vinv_iter);
}
elim_offdiag(&u_iter, &uinv_iter, &m_iter, &v_iter, &vinv_iter);
// update iterators
--m_iter.r;
--m_iter.c;
m_iter.p += (m_iter.Xr + m_iter.Xc);
--u_iter.c;
u_iter.p += u_iter.Xc;
--uinv_iter.r;
uinv_iter.p += uinv_iter.Xr;
--v_iter.r;
v_iter.p += v_iter.Xr;
--vinv_iter.c;
vinv_iter.p += vinv_iter.Xc;
}
return ndiag;
}
/*!
* Compute a representation of a linear map by a smith normal form.
* More precisely, for a linear map f:Z^r->Z^s, this function computes a commutative diagram
* f
* Z^r → Z^s
* V ↑ ↑ U
* Z^r → Z^s
* S
* where
* - U and V are unimodular;
* - S is in a Smith normal form.
* \param a transformed into the matrix product a <> U.
* \param m A representation matrix for f; transformed into S.
* \param b transformed into the matrix product b <> V.
* \pre Be sure that both a <> U and b <> V make sense.
*/
size_t smithRep(matrix_type * restrict a, matrix_type * restrict m, matrix_type * restrict b)
{
/* auxiliary matrix: column major */
matrix_type aux = {
.p = calloc(a->r * a->r, sizeof(target_type)),
.r = a->r,
.c = a->r,
.Xr = 1,
.Xc = a->r,
};
/* Initialize aux into the identity matrix. */
for(size_t i = 0; i < a->r; ++i)
MATRIX_AT(aux,i,i) = 1;
/*
* Compute the Smith normal form of m and save the number of non-zero diagonals.
* For cleaner image vectors, it is good to compute the Smith normal form of the transpose of m intead of m itself.
*/
transpose(m);
transpose(b);
size_t ndiag = smithNF(NULL, b, m, &aux, NULL);
transpose(m);
transpose(b);
transpose(&aux);
/* Multiply A by U which is as simple as possible. */
aux.c = ndiag;
hermiteNF_LLL(1, (matrix_type*[]){a}, 0, (matrix_type*[]){}, &aux);
/* We will not use aux any more. */
free(aux.p);
/* Make the kernel vectors cleaner. */
if (ndiag < m->r) {
matrix_type bker = {
.p = b->p + ndiag * b->Xc,
.r = b->c - ndiag,
.c = b->r,
.Xr = b->Xc,
.Xc = b->Xr
};
hermiteNF_LLL(
0, (matrix_type*[]){},
0, (matrix_type*[]){},
&bker );
}
return ndiag;
}
|
ep.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB EP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
//--------------------------------------------------------------------
// program EMBAR
//--------------------------------------------------------------------
// This is the serial version of the APP Benchmark 1,
// the "embarassingly parallel" benchmark.
//
//
// M is the Log_2 of the number of complex pairs of uniform (0, 1) random
// numbers. MK is the Log_2 of the size of each batch of uniform random
// numbers. MK can be set for convenience on a given system, since it does
// not affect the results.
//--------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "type.h"
#include "npbparams.h"
//#include "randdp.h"
#include "timers.h"
#include "print_results.h"
//#include "accelmath.h"
#ifdef SPEC_NO_INLINE
#define INLINE
#else
#ifdef SPEC_NO_STATIC_INLINE
#define INLINE inline
#else
#define INLINE static inline
#endif
#endif
#define MAX(X,Y) (((X) > (Y)) ? (X) : (Y))
//#define MK 16
//#define MM (M - MK)
//#define NN (1 << MM)
//#define NK (1 << MK)
//#define NQ 10
#define EPSILON 1.0e-8
#define A 1220703125.0
#define S 271828183.0
#ifndef SPEC_BLOCK_SIZE
#define BLKSIZE 1792
#else
#define BLKSIZE SPEC_BLOCK_SIZE
#endif
#define r23 1.1920928955078125e-07
#define r46 r23 * r23
#define t23 8.388608e+06
#define t46 t23 * t23
#pragma omp declare target
INLINE double randlc_ep( double *x, double a )
{
//--------------------------------------------------------------------
//
// This routine returns a uniform pseudorandom double precision number in the
// range (0, 1) by using the linear congruential generator
//
// x_{k+1} = a x_k (mod 2^46)
//
// where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers
// before repeating. The argument A is the same as 'a' in the above formula,
// and X is the same as x_0. A and X must be odd double precision integers
// in the range (1, 2^46). The returned value randlc_ep is normalized to be
// between 0 and 1, i.e. randlc_ep = 2^(-46) * x_1. X is updated to contain
// the new seed x_1, so that subsequent calls to randlc_ep using the same
// arguments will generate a continuous sequence.
//
// This routine should produce the same results on any computer with at least
// 48 mantissa bits in double precision floating point data. On 64 bit
// systems, double precision should be disabled.
//
// David H. Bailey October 26, 1990
//
//--------------------------------------------------------------------
// r23 = pow(0.5, 23.0);
//// pow(0.5, 23.0) = 1.1920928955078125e-07
// r46 = r23 * r23;
// t23 = pow(2.0, 23.0);
//// pow(2.0, 23.0) = 8.388608e+06
// t46 = t23 * t23;
/*
const double r23 = 1.1920928955078125e-07;
const double r46 = r23 * r23;
const double t23 = 8.388608e+06;
const double t46 = t23 * t23;
*/
double t1, t2, t3, t4, a1, a2, x1, x2, z;
double r;
//--------------------------------------------------------------------
// Break A into two parts such that A = 2^23 * A1 + A2.
//--------------------------------------------------------------------
t1 = r23 * a;
a1 = (int) t1;
a2 = a - t23 * a1;
//--------------------------------------------------------------------
// Break X into two parts such that X = 2^23 * X1 + X2, compute
// Z = A1 * X2 + A2 * X1 (mod 2^23), and then
// X = 2^23 * Z + A2 * X2 (mod 2^46).
//--------------------------------------------------------------------
t1 = r23 * (*x);
x1 = (int) t1;
x2 = *x - t23 * x1;
t1 = a1 * x2 + a2 * x1;
t2 = (int) (r23 * t1);
z = t1 - t23 * t2;
t3 = t23 * z + a2 * x2;
t4 = (int) (r46 * t3);
*x = t3 - t46 * t4;
r = r46 * (*x);
return r;
}
#pragma omp end declare target
int main()
{
double Mops, t1, t2, t3, t4, x1, x2;
double sx, sy, tm, an, tt, gc;
double sx_verify_value, sy_verify_value, sx_err, sy_err;
int np;
int i, ik, kk, l, k, nit;
int k_offset, j;
int verified, timers_enabled;
double q0, q1, q2, q3, q4, q5, q6, q7, q8, q9;
double *x;
double *q;
double *xx, *qq;
/*variables for inlining vranlc()*/
double in_t1, in_t2, in_t3, in_t4;
double in_a1, in_a2, in_x1, in_x2, in_z;
double tmp_sx, tmp_sy;
double dum[3] = {1.0, 1.0, 1.0};
char size[16];
int blksize = BLKSIZE;
int blk, koff, numblks;
int m, mk, mm, nn, nk, nq;
char xclass;
FILE *fp;
if ((fp = fopen("timer.flag", "r")) == NULL) {
timers_enabled = 0;
} else {
timers_enabled = 1;
fclose(fp);
}
if ((fp = fopen("ep.input", "r")) != NULL) {
int result;
printf(" Reading from input file ep.input\n");
result = fscanf(fp, "%d", &m);
while (fgetc(fp) != '\n');
result = fscanf(fp, "%c", &xclass);
while (fgetc(fp) != '\n');
fclose(fp);
} else {
printf(" No input file. Using compiled defaults \n");
m = M;
xclass = CLASS;
}
mk = 16;
mm = m - mk;
nk = (1 << mk);
np = (1 << mm);
nq = 10;
if (np < blksize) {
blksize = np;
}
numblks = ceil( (double)np / (double) blksize);
x = (double*)malloc(2*nk*sizeof(double));
xx = (double*)malloc(blksize*2*nk*sizeof(double));
q = (double*)malloc(nq*sizeof(double));
qq = (double*)malloc(blksize*nq*sizeof(double));
//--------------------------------------------------------------------
// Because the size of the problem is too large to store in a 32-bit
// integer for some classes, we put it into a string (for printing).
// Have to strip off the decimal point put in there by the floating
// point print statement (internal file)
//--------------------------------------------------------------------
sprintf(size, "%15.0lf", pow(2.0, M+1));
j = 14;
if (size[j] == '.') j--;
size[j+1] = '\0';
printf("\n\n NAS Parallel Benchmarks (NPB3.3-OPENMP-C) - EP Benchmark\n");
printf("\n Number of random numbers generated: %15s\n", size);
verified = 0;
//--------------------------------------------------------------------
// Compute the number of "batches" of random number pairs generated
// per processor. Adjust if the number of processors does not evenly
// divide the total number
//--------------------------------------------------------------------
//--------------------------------------------------------------------
// Call the random number generator functions and initialize
// the x-array to reduce the effects of paging on the timings.
// Also, call all mathematical functions that are used. Make
// sure these initializations cannot be eliminated as dead code.
//--------------------------------------------------------------------
#pragma omp target data map(alloc:x[0:2*nk],xx[0:blksize*2*nk],qq[0:blksize*nq]) map(from:q[0:nq])
{
dum[0] = randlc_ep(&dum[1], dum[2]);
#pragma omp target teams distribute parallel for simd map(x[:0])
for (i = 0; i < 2 * nk; i++) {
x[i] = -1.0e99;
}
#pragma omp target teams distribute parallel for simd map(q[:0])
for (i = 0; i < nq; i++) {
q[i] = 0.0;
}
Mops = log(sqrt(fabs(MAX(1.0, 1.0))));
timer_clear(0);
timer_clear(1);
timer_clear(2);
timer_start(0);
/*this function actullay does nothing, so comment it*/
//vranlc(0, &t1, A, x);
//#pragma omp target update to(x[0:2*NK])
//--------------------------------------------------------------------
// Compute AN = A ^ (2 * NK) (mod 2^46).
//--------------------------------------------------------------------
t1 = A;
for (i = 0; i < mk + 1; i++) {
t2 = randlc_ep(&t1, t1);
}
an = t1;
tt = S;
gc = 0.0;
sx = 0.0;
sy = 0.0;
k_offset = -1;
for (blk=0; blk < numblks; ++blk) {
koff = blk*blksize;
if (koff + blksize > np) {
blksize = np - (blk*blksize);
}
#pragma omp target teams distribute parallel for map(x[:0],xx[:0],qq[:0])
for(k=0; k<blksize; k++)
{
#pragma omp simd
for(i=0; i<nq; i++)
qq[k*nq + i] = 0.0;
#pragma omp simd
for(i=0; i<2*nk; i++)
xx[k*2*nk + i] = x[i];
}
//--------------------------------------------------------------------
// Each instance of this loop may be performed independently. We compute
// the k offsets separately to take into account the fact that some nodes
// have more numbers to generate than others
//--------------------------------------------------------------------
#pragma omp target teams distribute parallel for map(tofrom: sx,sy) private(i,t1,t2,t3,l,kk,ik,in_t1,in_t2,in_t3,in_t4,in_a1,in_a2,in_x1,in_x2,x1,x2,t4,in_z,tmp_sx,tmp_sy) shared(k_offset,koff,an,xx,nk,blksize,qq,nq) default(none) reduction(+:sx,sy) map(xx[:0],qq[:0])
for (k = 1; k <= blksize; k++) {
kk = k_offset + k + koff;
t1 = S;
t2 = an;
// Find starting seed t1 for this kk.
for (i = 1; i <= 100; i++) {
ik = kk / 2;
if ((2 * ik) != kk) t3 = randlc_ep(&t1, t2);
if (ik == 0) break;
t3 = randlc_ep(&t2, t2);
kk = ik;
}
//--------------------------------------------------------------------
// Compute uniform pseudorandom numbers.
//--------------------------------------------------------------------
//vranlc(2 * NK, &t1, A, x);
/*inline vranlc function*/
in_t1 = r23 * A;
in_a1 = (int)in_t1;
in_a2 = A - t23 * in_a1;
for(i=0; i<2*nk; i++)
{
in_t1 = r23 * t1;
in_x1 = (int)in_t1;
in_x2 = t1 - t23 * in_x1;
in_t1 = in_a1 * in_x2 + in_a2 * in_x1;
in_t2 = (int)(r23 * in_t1);
in_z = in_t1 - t23 * in_t2;
in_t3 = t23*in_z + in_a2 *in_x2;
in_t4 = (int)(r46 * in_t3);
t1 = in_t3 - t46 * in_t4;
xx[(k-1)*2*nk + i] = r46 * t1;
}
//--------------------------------------------------------------------
// Compute Gaussian deviates by acceptance-rejection method and
// tally counts in concentri//square annuli. This loop is not
// vectorizable.
//--------------------------------------------------------------------
//if (timers_enabled) timer_start(1);
tmp_sx = 0.0;
tmp_sy = 0.0;
for (i = 0; i < nk; i++) {
x1 = 2.0 * xx[(k-1)*2*nk + 2*i] - 1.0;
x2 = 2.0 * xx[(k-1)*2*nk + (2*i+1)] - 1.0;
t1 = x1 * x1 + x2 * x2;
if (t1 <= 1.0) {
t2 = sqrt(-2.0 * log(t1) / t1);
t3 = (x1 * t2);
t4 = (x2 * t2);
l = MAX(fabs(t3), fabs(t4));
qq[(k-1)*nq + l] += 1.0;
tmp_sx = tmp_sx + t3;
tmp_sy = tmp_sy + t4;
}
}
sx += tmp_sx;
sy += tmp_sy;
}
#pragma omp target teams distribute map(tofrom: gc) reduction(+:gc) map(qq[:0],q[:0])
for(i=0; i<nq; i++)
{
double sum_qi = 0.0;
#pragma omp parallel for simd reduction(+:sum_qi)
for(k=0; k<blksize; k++)
sum_qi = sum_qi + qq[k*nq + i];
/*sum of each column of qq/q[i] */
q[i] += sum_qi;
/*final sum of q*/
gc += sum_qi;
}
}//end for
}/*end omp data*/
timer_stop(0);
tm = timer_read(0);
nit = 0;
verified = 1;
if (m == 24) {
sx_verify_value = -3.247834652034740e+3;
sy_verify_value = -6.958407078382297e+3;
} else if (m == 25) {
sx_verify_value = -2.863319731645753e+3;
sy_verify_value = -6.320053679109499e+3;
} else if (m == 28) {
sx_verify_value = -4.295875165629892e+3;
sy_verify_value = -1.580732573678431e+4;
} else if (m == 30) {
sx_verify_value = 4.033815542441498e+4;
sy_verify_value = -2.660669192809235e+4;
} else if (m == 32) {
sx_verify_value = 4.764367927995374e+4;
sy_verify_value = -8.084072988043731e+4;
} else if (m == 36) {
sx_verify_value = 1.982481200946593e+5;
sy_verify_value = -1.020596636361769e+5;
} else if (m == 40) {
sx_verify_value = -5.319717441530e+05;
sy_verify_value = -3.688834557731e+05;
} else {
verified = 0;
}
if (verified) {
sx_err = fabs((sx - sx_verify_value) / sx_verify_value);
sy_err = fabs((sy - sy_verify_value) / sy_verify_value);
verified = ((sx_err <= EPSILON) && (sy_err <= EPSILON));
}
Mops = pow(2.0, m+1) / tm / 1000000.0;
printf("\nEP Benchmark Results:\n\n");
#ifndef SPEC
printf("CPU Time =%10.4lf\n", tm);
#endif
printf("N = 2^%5d\n", M);
printf("No. Gaussian Pairs = %15.0lf\n", gc);
printf("Sums = %25.15lE %25.15lE\n", sx, sy);
printf("Counts: \n");
for (i = 0; i < nq; i++) {
printf("%3d%15.0lf\n", i, q[i]);
}
print_results("EP", xclass, m+1, 0, 0, nit,
tm, Mops,
"Random numbers generated",
verified, NPBVERSION, COMPILETIME, CS1,
CS2, CS3, CS4, CS5, CS6, CS7);
if (timers_enabled) {
if (tm <= 0.0) tm = 1.0;
tt = timer_read(0);
printf("\nTotal time: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
tt = timer_read(1);
printf("Gaussian pairs: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
tt = timer_read(2);
printf("Random numbers: %9.3lf (%6.2lf)\n", tt, tt*100.0/tm);
}
free(x);
free(q);
free(xx);
free(qq);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.